DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 01/23] common/cnxk: fix part value for cn10k
@ 2022-08-09 18:48 Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 02/23] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
                   ` (24 more replies)
  0 siblings, 25 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Updating the logic for getting part and pass value for cn10k family,
as device tree compatible logic does not work in VMs.
Scanning all the PCI device and detect first RVU device, subsystem
device file gives part no and revision file provide pass information.

Fixes: 014a9e222bac ("common/cnxk: add model init and IO handling API")

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---

Depends-on: series-23650("[v2] event/cnxk: add eth port specific PTP enable")
Depends-on: series-24029("[1/4] cnxk/net: add fc check in vector event Tx path")

 drivers/common/cnxk/roc_model.c    | 152 +++++++++++++++++++++++++++----------
 drivers/common/cnxk/roc_platform.h |   3 +
 2 files changed, 113 insertions(+), 42 deletions(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index a68baa6..791ffa6 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */
 
+#include <dirent.h>
 #include <fcntl.h>
 #include <unistd.h>
 
@@ -40,6 +41,16 @@ struct roc_model *roc_model;
 #define MODEL_MINOR_SHIFT 0
 #define MODEL_MINOR_MASK  ((1 << MODEL_MINOR_BITS) - 1)
 
+#define MODEL_CN10K_PART_SHIFT	8
+#define MODEL_CN10K_PASS_BITS	4
+#define MODEL_CN10K_PASS_MASK	((1 << MODEL_CN10K_PASS_BITS) - 1)
+#define MODEL_CN10K_MAJOR_BITS	2
+#define MODEL_CN10K_MAJOR_SHIFT 2
+#define MODEL_CN10K_MAJOR_MASK	((1 << MODEL_CN10K_MAJOR_BITS) - 1)
+#define MODEL_CN10K_MINOR_BITS	2
+#define MODEL_CN10K_MINOR_SHIFT 0
+#define MODEL_CN10K_MINOR_MASK	((1 << MODEL_CN10K_MINOR_BITS) - 1)
+
 static const struct model_db {
 	uint32_t impl;
 	uint32_t part;
@@ -66,55 +77,101 @@ static const struct model_db {
 	{VENDOR_CAVIUM, PART_95xxMM, 0, 0, ROC_MODEL_CNF95xxMM_A0,
 	 "cnf95xxmm_a0"}};
 
-static uint32_t
-cn10k_part_get(void)
+/* Detect if RVU device */
+static bool
+is_rvu_device(unsigned long val)
 {
-	uint32_t soc = 0x0;
-	char buf[BUFSIZ];
-	char *ptr;
-	FILE *fd;
-
-	/* Read the CPU compatible variant */
-	fd = fopen("/proc/device-tree/compatible", "r");
-	if (!fd) {
-		plt_err("Failed to open /proc/device-tree/compatible");
-		goto err;
-	}
+	return (val == PCI_DEVID_CNXK_RVU_PF || val == PCI_DEVID_CNXK_RVU_VF ||
+		val == PCI_DEVID_CNXK_RVU_AF ||
+		val == PCI_DEVID_CNXK_RVU_AF_VF ||
+		val == PCI_DEVID_CNXK_RVU_NPA_PF ||
+		val == PCI_DEVID_CNXK_RVU_NPA_VF ||
+		val == PCI_DEVID_CNXK_RVU_SSO_TIM_PF ||
+		val == PCI_DEVID_CNXK_RVU_SSO_TIM_VF ||
+		val == PCI_DEVID_CN10K_RVU_CPT_PF ||
+		val == PCI_DEVID_CN10K_RVU_CPT_VF);
+}
 
-	if (fgets(buf, sizeof(buf), fd) == NULL) {
-		plt_err("Failed to read from /proc/device-tree/compatible");
-		goto fclose;
-	}
-	ptr = strchr(buf, ',');
-	if (!ptr) {
-		plt_err("Malformed 'CPU compatible': <%s>", buf);
-		goto fclose;
-	}
-	ptr++;
-	if (strcmp("cn10ka", ptr) == 0) {
-		soc = PART_106xx;
-	} else if (strcmp("cnf10ka", ptr) == 0) {
-		soc = PART_105xx;
-	} else if (strcmp("cnf10kb", ptr) == 0) {
-		soc = PART_105xxN;
-	} else if (strcmp("cn10kb", ptr) == 0) {
-		soc = PART_103xx;
-	} else {
-		plt_err("Unidentified 'CPU compatible': <%s>", ptr);
-		goto fclose;
+static int
+rvu_device_lookup(const char *dirname, uint32_t *part, uint32_t *pass)
+{
+	char filename[PATH_MAX];
+	unsigned long val;
+
+	/* Check if vendor id is cavium */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	if (val != PCI_VENDOR_ID_CAVIUM)
+		goto error;
+
+	/* Get device id  */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	/* Check if device ID belongs to any RVU device */
+	if (!is_rvu_device(val))
+		goto error;
+
+	/* Get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	*part = val >> MODEL_CN10K_PART_SHIFT;
+
+	/* Get revision for pass value*/
+	snprintf(filename, sizeof(filename), "%s/revision", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	*pass = val & MODEL_CN10K_PASS_MASK;
+
+	return 0;
+error:
+	return -EINVAL;
+}
+
+/* Scans through all PCI devices, detects RVU device and returns
+ * subsystem_device
+ */
+static int
+cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
+{
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+	char dirname[PATH_MAX];
+	struct dirent *e;
+	DIR *dir;
+
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL) {
+		plt_err("%s(): opendir failed: %s\n", __func__,
+			strerror(errno));
+		return -errno;
 	}
 
-fclose:
-	fclose(fd);
+	while ((e = readdir(dir)) != NULL) {
+		if (e->d_name[0] == '.')
+			continue;
+
+		snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
+			 e->d_name);
+
+		/* Lookup for rvu device and get part pass information */
+		if (!rvu_device_lookup(dirname, part, pass))
+			break;
+	}
 
-err:
-	return soc;
+	closedir(dir);
+	return 0;
 }
 
 static bool
 populate_model(struct roc_model *model, uint32_t midr)
 {
-	uint32_t impl, major, part, minor;
+	uint32_t impl, major, part, minor, pass;
 	bool found = false;
 	size_t i;
 
@@ -124,8 +181,19 @@ populate_model(struct roc_model *model, uint32_t midr)
 	minor = (midr >> MODEL_MINOR_SHIFT) & MODEL_MINOR_MASK;
 
 	/* Update part number for cn10k from device-tree */
-	if (part == SOC_PART_CN10K)
-		part = cn10k_part_get();
+	if (part == SOC_PART_CN10K) {
+		if (cn10k_part_pass_get(&part, &pass))
+			goto not_found;
+		/*
+		 * Pass value format:
+		 * Bits 0..1: minor pass
+		 * Bits 3..2: major pass
+		 */
+		minor = (pass >> MODEL_CN10K_MINOR_SHIFT) &
+			MODEL_CN10K_MINOR_MASK;
+		major = (pass >> MODEL_CN10K_MAJOR_SHIFT) &
+			MODEL_CN10K_MAJOR_MASK;
+	}
 
 	for (i = 0; i < PLT_DIM(model_db); i++)
 		if (model_db[i].impl == impl && model_db[i].part == part &&
@@ -136,7 +204,7 @@ populate_model(struct roc_model *model, uint32_t midr)
 			found = true;
 			break;
 		}
-
+not_found:
 	if (!found) {
 		model->flag = 0;
 		strncpy(model->name, "unknown", ROC_MODEL_STR_LEN_MAX - 1);
diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
index 502f243..3e7adfc 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -24,6 +24,8 @@
 #include <rte_tailq.h>
 #include <rte_telemetry.h>
 
+#include "eal_filesystem.h"
+
 #include "roc_bits.h"
 
 #if defined(__ARM_FEATURE_SVE)
@@ -94,6 +96,7 @@
 #define plt_pci_device		    rte_pci_device
 #define plt_pci_read_config	    rte_pci_read_config
 #define plt_pci_find_ext_capability rte_pci_find_ext_capability
+#define plt_sysfs_value_parse	    eal_parse_sysfs_value
 
 #define plt_log2_u32	 rte_log2_u32
 #define plt_cpu_to_be_16 rte_cpu_to_be_16
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 02/23] common/cnxk: add cn10ka A1 platform
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 03/23] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
                   ` (23 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for cn10ka A1 pass

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 1 +
 drivers/common/cnxk/roc_model.h | 9 ++++++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index 791ffa6..b040bc0 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -60,6 +60,7 @@ static const struct model_db {
 	char name[ROC_MODEL_STR_LEN_MAX];
 } model_db[] = {
 	{VENDOR_ARM, PART_106xx, 0, 0, ROC_MODEL_CN106xx_A0, "cn10ka_a0"},
+	{VENDOR_ARM, PART_106xx, 0, 1, ROC_MODEL_CN106xx_A1, "cn10ka_a1"},
 	{VENDOR_ARM, PART_105xx, 0, 0, ROC_MODEL_CNF105xx_A0, "cnf10ka_a0"},
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index 37c8a47..d231d44 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -25,6 +25,7 @@ struct roc_model {
 #define ROC_MODEL_CNF105xx_A0  BIT_ULL(21)
 #define ROC_MODEL_CNF105xxN_A0 BIT_ULL(22)
 #define ROC_MODEL_CN103xx_A0   BIT_ULL(23)
+#define ROC_MODEL_CN106xx_A1   BIT_ULL(24)
 /* Following flags describe platform code is running on */
 #define ROC_ENV_HW   BIT_ULL(61)
 #define ROC_ENV_EMUL BIT_ULL(62)
@@ -48,7 +49,7 @@ struct roc_model {
 	 ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CNF95xxN_A1 |                       \
 	 ROC_MODEL_CNF95xxN_B0)
 
-#define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0)
+#define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0 | ROC_MODEL_CN106xx_A1)
 #define ROC_MODEL_CNF105xx  (ROC_MODEL_CNF105xx_A0)
 #define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0)
 #define ROC_MODEL_CN103xx   (ROC_MODEL_CN103xx_A0)
@@ -192,6 +193,12 @@ roc_model_is_cn10ka_a0(void)
 }
 
 static inline uint64_t
+roc_model_is_cn10ka_a1(void)
+{
+	return roc_model->flag & ROC_MODEL_CN106xx_A1;
+}
+
+static inline uint64_t
 roc_model_is_cnf10ka_a0(void)
 {
 	return roc_model->flag & ROC_MODEL_CNF105xx_A0;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 03/23] common/cnxk: update inbound inline IPsec config mailbox
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 02/23] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 04/23] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
                   ` (22 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev, Srujana Challa

From: Srujana Challa <schalla@marvell.com>

Updates CPT inbound inline IPsec configuration mailbox
to provide opcode and CPT credit from VF.
This patch also adds mailbox for reading inbound IPsec
configuration.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/common/cnxk/roc_cpt.c   | 15 +++++++++++++++
 drivers/common/cnxk/roc_cpt.h   |  2 ++
 drivers/common/cnxk/roc_mbox.h  | 12 +++++++++---
 drivers/common/cnxk/version.map |  1 +
 4 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index f1be6a3..d607bde 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -261,6 +261,21 @@ roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
 }
 
 int
+roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
+				  struct nix_inline_ipsec_cfg *inb_cfg)
+{
+	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
+	struct dev *dev = &cpt->dev;
+	struct msg_req *req;
+
+	req = mbox_alloc_msg_nix_read_inline_ipsec_cfg(dev->mbox);
+	if (req == NULL)
+		return -EIO;
+
+	return mbox_process_msg(dev->mbox, (void *)&inb_cfg);
+}
+
+int
 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 			     uint16_t param2)
 {
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index a3a65f1..4e3a078 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -158,6 +158,8 @@ int __roc_api roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, void *cptr,
 int __roc_api roc_cpt_lf_ctx_reload(struct roc_cpt_lf *lf, void *cptr);
 int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 				       struct roc_nix *nix);
+int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(
+	struct roc_cpt *roc_cpt, struct nix_inline_ipsec_cfg *inb_cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
 					   uint16_t param1, uint16_t param2);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 965c704..912de11 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -263,7 +263,9 @@ struct mbox_msghdr {
 	  nix_bp_cfg_rsp)                                                      \
 	M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req,      \
 	  msg_rsp)                                                             \
-	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)
+	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)            \
+	M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg,        \
+	  msg_req, nix_inline_ipsec_cfg)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1161,7 +1163,9 @@ struct nix_inline_ipsec_cfg {
 	uint32_t __io cpt_credit;
 	struct {
 		uint8_t __io egrp;
-		uint8_t __io opcode;
+		uint16_t __io opcode;
+		uint16_t __io param1;
+		uint16_t __io param2;
 	} gen_cfg;
 	struct {
 		uint16_t __io cpt_pf_func;
@@ -1465,7 +1469,9 @@ struct cpt_rx_inline_lf_cfg_msg {
 	uint16_t __io sso_pf_func;
 	uint16_t __io param1;
 	uint16_t __io param2;
-	uint16_t __io reserved;
+	uint16_t __io opcode;
+	uint32_t __io credit;
+	uint32_t __io reserved;
 };
 
 enum cpt_eng_type {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 019f531..a2d99e1 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -65,6 +65,7 @@ INTERNAL {
 	roc_cpt_dev_init;
 	roc_cpt_eng_grp_add;
 	roc_cpt_inline_ipsec_cfg;
+	roc_cpt_inline_ipsec_inb_cfg_read;
 	roc_cpt_inline_ipsec_inb_cfg;
 	roc_cpt_iq_disable;
 	roc_cpt_iq_enable;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 04/23] net/cnxk: fix missing fc wait for outbound path in vec mode
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 02/23] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 03/23] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 05/23] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
                   ` (21 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Fix missing fc wait for outbound path in vector mode.
Currently only poll mode has it.

Fixes: 358d02d20a2f ("net/cnxk: support flow control for outbound inline")
Cc: ndabilpuram@marvell.com

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 8056510..07c88a9 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1049,9 +1049,13 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		uint16_t sec_pkts = ((c_lnum << 1) + c_loff);
+
 		/* Reduce pkts to be sent to CPT */
-		burst -= ((c_lnum << 1) + c_loff);
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		burst -= sec_pkts;
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
@@ -1199,9 +1203,13 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		uint16_t sec_pkts = ((c_lnum << 1) + c_loff);
+
 		/* Reduce pkts to be sent to CPT */
-		burst -= ((c_lnum << 1) + c_loff);
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		burst -= sec_pkts;
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
@@ -2753,7 +2761,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		uint16_t sec_pkts = (c_lnum << 1) + c_loff;
+
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 05/23] common/cnxk: limit meta aura workaround to CN10K A0
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (2 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 04/23] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 06/23] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
                   ` (20 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Limit meta aura workaround to CN10K A0.
Also other NIX and Inline related Erratas applicable
for CN10K A1.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_errata.h  |  7 +++++++
 drivers/common/cnxk/roc_nix_inl.c | 10 ++++++----
 drivers/net/cnxk/cnxk_ethdev.c    |  3 ++-
 3 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index f048297..8dc372f 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -81,6 +81,13 @@ roc_errata_nix_has_perf_issue_on_stats_update(void)
 static inline bool
 roc_errata_cpt_hang_on_x2p_bp(void)
 {
+	return roc_model_is_cn10ka_a0() || roc_model_is_cn10ka_a1();
+}
+
+/* IPBUNIXRX-40400 */
+static inline bool
+roc_errata_nix_no_meta_aura(void)
+{
 	return roc_model_is_cn10ka_a0();
 }
 
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 7da8938..603551b 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -627,18 +627,18 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 	inl_rq->first_skip = rq->first_skip;
 	inl_rq->later_skip = rq->later_skip;
 	inl_rq->lpb_size = rq->lpb_size;
-	inl_rq->lpb_drop_ena = true;
 	inl_rq->spb_ena = rq->spb_ena;
 	inl_rq->spb_aura_handle = rq->spb_aura_handle;
 	inl_rq->spb_size = rq->spb_size;
-	inl_rq->spb_drop_ena = !!rq->spb_ena;
 
-	if (!roc_model_is_cn9k()) {
+	if (roc_errata_nix_no_meta_aura()) {
 		uint64_t aura_limit =
 			roc_npa_aura_op_limit_get(inl_rq->aura_handle);
 		uint64_t aura_shift = plt_log2_u32(aura_limit);
 		uint64_t aura_drop, drop_pc;
 
+		inl_rq->lpb_drop_ena = true;
+
 		if (aura_shift < 8)
 			aura_shift = 0;
 		else
@@ -653,12 +653,14 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 		roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
 	}
 
-	if (inl_rq->spb_ena) {
+	if (roc_errata_nix_no_meta_aura() && inl_rq->spb_ena) {
 		uint64_t aura_limit =
 			roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
 		uint64_t aura_shift = plt_log2_u32(aura_limit);
 		uint64_t aura_drop, drop_pc;
 
+		inl_rq->spb_drop_ena = true;
+
 		if (aura_shift < 8)
 			aura_shift = 0;
 		else
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 2418290..df20f27 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -617,7 +617,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rq->first_skip = first_skip;
 	rq->later_skip = sizeof(struct rte_mbuf);
 	rq->lpb_size = mp->elt_size;
-	rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
+	if (roc_errata_nix_no_meta_aura())
+		rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
 
 	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
 	if (roc_nix_inl_inb_is_enabled(nix))
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 06/23] common/cnxk: delay inline device RQ enable to dev start
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (3 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 05/23] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 07/23] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
                   ` (19 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Similar to other RQ's, delay inline device rq until dev is started
to avoid traffic reception when device is stopped.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_idev.h    |  2 --
 drivers/common/cnxk/roc_nix_inl.c | 34 +++++++++++++++++++++++++++++++---
 drivers/common/cnxk/roc_nix_inl.h |  5 ++++-
 drivers/common/cnxk/version.map   |  7 ++++---
 drivers/net/cnxk/cnxk_ethdev.c    | 14 +++++++++++++-
 5 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 7e0beed..16793c2 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -17,6 +17,4 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
 
-uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
-
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 603551b..c621867 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -245,6 +245,9 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
 	struct roc_cpt *roc_cpt;
 	struct roc_cpt_rxc_time_cfg cfg;
 
+	if (!idev)
+		return -EFAULT;
+
 	PLT_SET_USED(max_frags);
 	if (idev == NULL)
 		return -ENOTSUP;
@@ -587,7 +590,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
 }
 
 int
-roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
+roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 {
 	struct idev_cfg *idev = idev_get_cfg();
 	int port_id = rq->roc_nix->port_id;
@@ -688,9 +691,9 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 
 	/* Prepare and send RQ init mbox */
 	if (roc_model_is_cn9k())
-		rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
+		rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, enable);
 	else
-		rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
+		rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, enable);
 	if (rc) {
 		plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
 		return rc;
@@ -755,6 +758,31 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 	return rc;
 }
 
+int
+roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_nix_rq *inl_rq = roc_nix_inl_dev_rq(roc_nix);
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
+	int rc;
+
+	if (!idev)
+		return -EFAULT;
+
+	if (nix->inb_inl_dev) {
+		if (!inl_rq || !idev->nix_inl_dev)
+			return -EFAULT;
+
+		inl_dev = idev->nix_inl_dev;
+
+		rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
 void
 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
 {
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index c7b1817..702ec01 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -165,7 +165,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
 uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
 					   bool inl_dev_sa, uint32_t spi);
 void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
-int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq);
+int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
 int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
 bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
 struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
@@ -175,6 +175,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
 					   uint16_t max_frags);
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
 				       bool inb_inl_dev);
+int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
 
 /* NIX Inline Outbound API */
 int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@@ -189,6 +190,8 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
 					void *args);
 int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
+uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
 	ROC_NIX_INL_SA_OP_FLUSH,
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index a2d99e1..6d43e37 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -90,7 +90,6 @@ INTERNAL {
 	roc_hash_sha512_gen;
 	roc_idev_cpt_get;
 	roc_idev_cpt_set;
-	roc_nix_inl_outb_ring_base_get;
 	roc_idev_lmt_base_addr_get;
 	roc_idev_npa_maxpools_get;
 	roc_idev_npa_maxpools_set;
@@ -137,11 +136,13 @@ INTERNAL {
 	roc_nix_get_vwqe_interval;
 	roc_nix_inl_cb_register;
 	roc_nix_inl_cb_unregister;
+	roc_nix_inl_ctx_write;
 	roc_nix_inl_dev_dump;
 	roc_nix_inl_dev_fini;
 	roc_nix_inl_dev_init;
 	roc_nix_inl_dev_is_probed;
 	roc_nix_inl_dev_lock;
+	roc_nix_inl_dev_pffunc_get;
 	roc_nix_inl_dev_rq;
 	roc_nix_inl_dev_rq_get;
 	roc_nix_inl_dev_rq_put;
@@ -163,11 +164,11 @@ INTERNAL {
 	roc_nix_inl_outb_sa_base_get;
 	roc_nix_inl_outb_sso_pffunc_get;
 	roc_nix_inl_outb_is_enabled;
+	roc_nix_inl_outb_ring_base_get;
 	roc_nix_inl_outb_soft_exp_poll_switch;
+	roc_nix_inl_rq_ena_dis;
 	roc_nix_inl_sa_sync;
 	roc_nix_inl_ts_pkind_set;
-	roc_nix_inl_ctx_write;
-	roc_nix_inl_dev_pffunc_get;
 	roc_nix_inl_outb_cpt_lfs_dump;
 	roc_nix_cpt_ctx_cache_sync;
 	roc_nix_is_lbk;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index df20f27..b3af2f8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -660,7 +660,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
 
 		/* Setup rq reference for inline dev if present */
-		rc = roc_nix_inl_dev_rq_get(rq);
+		rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started);
 		if (rc)
 			goto free_mem;
 	}
@@ -1482,6 +1482,10 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
 
 	roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
 
+	/* Stop inline device RQ first */
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+		roc_nix_inl_rq_ena_dis(&dev->nix, false);
+
 	/* Stop rx queues and free up pkts pending */
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		rc = dev_ops->rx_queue_stop(eth_dev, i);
@@ -1527,6 +1531,14 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 			return rc;
 	}
 
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		rc = roc_nix_inl_rq_ena_dis(&dev->nix, true);
+		if (rc) {
+			plt_err("Failed to enable Inline device RQ, rc=%d", rc);
+			return rc;
+		}
+	}
+
 	/* Start tx queues  */
 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 		rc = cnxk_nix_tx_queue_start(eth_dev, i);
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 07/23] common/cnxk: reserve aura zero on cn10ka NPA
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (4 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 06/23] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 08/23] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
                   ` (18 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella, Ashwin Sekhar T K, Pavan Nikhilesh
  Cc: jerinj, dev

Reserve aura id 0 on cn10k and provide mechanism to
specifically allocate it and free it via roc_npa_*
API's.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_dpi.c           |   2 +-
 drivers/common/cnxk/roc_nix_queue.c     |   2 +-
 drivers/common/cnxk/roc_npa.c           | 100 ++++++++++++++++++++++++++------
 drivers/common/cnxk/roc_npa.h           |   6 +-
 drivers/common/cnxk/roc_npa_priv.h      |   1 +
 drivers/common/cnxk/roc_sso.c           |   2 +-
 drivers/common/cnxk/version.map         |   1 +
 drivers/mempool/cnxk/cnxk_mempool_ops.c |   7 ++-
 8 files changed, 97 insertions(+), 24 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index 23b2cc4..93c8318 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -75,7 +75,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi)
 
 	memset(&aura, 0, sizeof(aura));
 	rc = roc_npa_pool_create(&aura_handle, DPI_CMD_QUEUE_SIZE,
-				 DPI_CMD_QUEUE_BUFS, &aura, &pool);
+				 DPI_CMD_QUEUE_BUFS, &aura, &pool, 0);
 	if (rc) {
 		plt_err("Failed to create NPA pool, err %d\n", rc);
 		return rc;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 692b134..70b4516 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -713,7 +713,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	aura.fc_addr = (uint64_t)sq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
 	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
-				 &pool);
+				 &pool, 0);
 	if (rc)
 		goto fail;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 1e60f44..760a231 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -261,15 +261,59 @@ bitmap_ctzll(uint64_t slab)
 }
 
 static int
+find_free_aura(struct npa_lf *lf, uint32_t flags)
+{
+	struct plt_bitmap *bmp = lf->npa_bmp;
+	uint64_t aura0_state = 0;
+	uint64_t slab;
+	uint32_t pos;
+	int idx = -1;
+	int rc;
+
+	if (flags & ROC_NPA_ZERO_AURA_F) {
+		/* Only look for zero aura */
+		if (plt_bitmap_get(bmp, 0))
+			return 0;
+		plt_err("Zero aura already in use");
+		return -1;
+	}
+
+	if (lf->zero_aura_rsvd) {
+		/* Save and clear zero aura bit if needed */
+		aura0_state = plt_bitmap_get(bmp, 0);
+		if (aura0_state)
+			plt_bitmap_clear(bmp, 0);
+	}
+
+	pos = 0;
+	slab = 0;
+	/* Scan from the beginning */
+	plt_bitmap_scan_init(bmp);
+	/* Scan bitmap to get the free pool */
+	rc = plt_bitmap_scan(bmp, &pos, &slab);
+	/* Empty bitmap */
+	if (rc == 0) {
+		plt_err("Aura's exhausted");
+		goto empty;
+	}
+
+	idx = pos + bitmap_ctzll(slab);
+empty:
+	if (lf->zero_aura_rsvd && aura0_state)
+		plt_bitmap_set(bmp, 0);
+
+	return idx;
+}
+
+static int
 npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 			 const uint32_t block_count, struct npa_aura_s *aura,
-			 struct npa_pool_s *pool, uint64_t *aura_handle)
+			 struct npa_pool_s *pool, uint64_t *aura_handle,
+			 uint32_t flags)
 {
 	int rc, aura_id, pool_id, stack_size, alloc_size;
 	char name[PLT_MEMZONE_NAMESIZE];
 	const struct plt_memzone *mz;
-	uint64_t slab;
-	uint32_t pos;
 
 	/* Sanity check */
 	if (!lf || !block_size || !block_count || !pool || !aura ||
@@ -281,20 +325,11 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 	    block_size > ROC_NPA_MAX_BLOCK_SZ)
 		return NPA_ERR_INVALID_BLOCK_SZ;
 
-	pos = 0;
-	slab = 0;
-	/* Scan from the beginning */
-	plt_bitmap_scan_init(lf->npa_bmp);
-	/* Scan bitmap to get the free pool */
-	rc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab);
-	/* Empty bitmap */
-	if (rc == 0) {
-		plt_err("Mempools exhausted");
-		return NPA_ERR_AURA_ID_ALLOC;
-	}
-
 	/* Get aura_id from resource bitmap */
-	aura_id = pos + bitmap_ctzll(slab);
+	aura_id = find_free_aura(lf, flags);
+	if (aura_id < 0)
+		return NPA_ERR_AURA_ID_ALLOC;
+
 	/* Mark pool as reserved */
 	plt_bitmap_clear(lf->npa_bmp, aura_id);
 
@@ -374,7 +409,7 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 int
 roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 		    uint32_t block_count, struct npa_aura_s *aura,
-		    struct npa_pool_s *pool)
+		    struct npa_pool_s *pool, uint32_t flags)
 {
 	struct npa_aura_s defaura;
 	struct npa_pool_s defpool;
@@ -394,6 +429,11 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 		goto error;
 	}
 
+	if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
+		rc = NPA_ERR_ALLOC;
+		goto error;
+	}
+
 	if (aura == NULL) {
 		memset(&defaura, 0, sizeof(struct npa_aura_s));
 		aura = &defaura;
@@ -406,7 +446,7 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 	}
 
 	rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool,
-				      aura_handle);
+				      aura_handle, flags);
 	if (rc) {
 		plt_err("Failed to alloc pool or aura rc=%d", rc);
 		goto error;
@@ -522,6 +562,26 @@ roc_npa_pool_range_update_check(uint64_t aura_handle)
 	return 0;
 }
 
+uint64_t
+roc_npa_zero_aura_handle(void)
+{
+	struct idev_cfg *idev;
+	struct npa_lf *lf;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL)
+		return NPA_ERR_DEVICE_NOT_BOUNDED;
+
+	idev = idev_get_cfg();
+	if (idev == NULL)
+		return NPA_ERR_ALLOC;
+
+	/* Return aura handle only if reserved */
+	if (lf->zero_aura_rsvd)
+		return roc_npa_aura_handle_gen(0, lf->base);
+	return 0;
+}
+
 static inline int
 npa_attach(struct mbox *mbox)
 {
@@ -672,6 +732,10 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 	for (i = 0; i < nr_pools; i++)
 		plt_bitmap_set(lf->npa_bmp, i);
 
+	/* Reserve zero aura for all models other than CN9K */
+	if (!roc_model_is_cn9k())
+		lf->zero_aura_rsvd = true;
+
 	/* Allocate memory for qint context */
 	lf->npa_qint_mem = plt_zmalloc(sizeof(struct npa_qint) * nr_pools, 0);
 	if (lf->npa_qint_mem == NULL) {
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 59d13d8..69129cb 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -711,10 +711,13 @@ struct roc_npa {
 int __roc_api roc_npa_dev_init(struct roc_npa *roc_npa);
 int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
 
+/* Flags to pool create */
+#define ROC_NPA_ZERO_AURA_F BIT(0)
+
 /* NPA pool */
 int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 				  uint32_t block_count, struct npa_aura_s *aura,
-				  struct npa_pool_s *pool);
+				  struct npa_pool_s *pool, uint32_t flags);
 int __roc_api roc_npa_aura_limit_modify(uint64_t aura_handle,
 					uint16_t aura_limit);
 int __roc_api roc_npa_pool_destroy(uint64_t aura_handle);
@@ -722,6 +725,7 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 					 uint64_t start_iova,
 					 uint64_t end_iova);
+uint64_t __roc_api roc_npa_zero_aura_handle(void);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h
index 5a02a61..de3d544 100644
--- a/drivers/common/cnxk/roc_npa_priv.h
+++ b/drivers/common/cnxk/roc_npa_priv.h
@@ -32,6 +32,7 @@ struct npa_lf {
 	uint8_t aura_sz;
 	uint32_t qints;
 	uintptr_t base;
+	bool zero_aura_rsvd;
 };
 
 struct npa_qint {
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 126a9cb..4bee5a9 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -473,7 +473,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
 	aura.fc_addr = (uint64_t)xaq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
 	rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
-				 &aura, &pool);
+				 &aura, &pool, 0);
 	if (rc) {
 		plt_err("Failed to create XAQ pool");
 		goto npa_fail;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6d43e37..6c05e89 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -318,6 +318,7 @@ INTERNAL {
 	roc_npa_pool_destroy;
 	roc_npa_pool_op_pc_reset;
 	roc_npa_pool_range_update_check;
+	roc_npa_zero_aura_handle;
 	roc_npc_fini;
 	roc_npc_flow_create;
 	roc_npc_flow_destroy;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index c7b75f0..a0b94bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,10 +72,10 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
 int
 cnxk_mempool_alloc(struct rte_mempool *mp)
 {
+	uint32_t block_count, flags = 0;
 	uint64_t aura_handle = 0;
 	struct npa_aura_s aura;
 	struct npa_pool_s pool;
-	uint32_t block_count;
 	size_t block_size;
 	int rc = -ERANGE;
 
@@ -100,8 +100,11 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
 	if (mp->pool_config != NULL)
 		memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
 
+	if (aura.ena && aura.pool_addr == 0)
+		flags = ROC_NPA_ZERO_AURA_F;
+
 	rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
-				 &pool);
+				 &pool, flags);
 	if (rc) {
 		plt_err("Failed to alloc pool or aura rc=%d", rc);
 		goto error;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 08/23] common/cnxk: add support to set NPA buf type
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (5 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 07/23] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 09/23] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
                   ` (17 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Add support to set/get per-aura buf type with refs and
get sum of all aura limits matching given buf type mask
and val.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/meson.build    |  1 +
 drivers/common/cnxk/roc_npa.c      | 11 +++++
 drivers/common/cnxk/roc_npa.h      | 22 +++++++++
 drivers/common/cnxk/roc_npa_priv.h |  8 ++-
 drivers/common/cnxk/roc_npa_type.c | 99 ++++++++++++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map    |  3 ++
 6 files changed, 143 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/cnxk/roc_npa_type.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 6f80827..127fcbc 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -51,6 +51,7 @@ sources = files(
         'roc_npa.c',
         'roc_npa_debug.c',
         'roc_npa_irq.c',
+        'roc_npa_type.c',
         'roc_npc.c',
         'roc_npc_mcam.c',
         'roc_npc_mcam_dump.c',
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 760a231..ee42434 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -499,6 +499,7 @@ npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle)
 	pool_id = aura_id;
 	rc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle);
 	rc |= npa_stack_dma_free(lf, name, pool_id);
+	memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr));
 
 	plt_bitmap_set(lf->npa_bmp, aura_id);
 
@@ -750,6 +751,13 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 		goto qint_free;
 	}
 
+	/* Allocate per-aura attribute */
+	lf->aura_attr = plt_zmalloc(sizeof(struct npa_aura_attr) * nr_pools, 0);
+	if (lf->aura_attr == NULL) {
+		rc = NPA_ERR_PARAM;
+		goto lim_free;
+	}
+
 	/* Init aura start & end limits */
 	for (i = 0; i < nr_pools; i++) {
 		lf->aura_lim[i].ptr_start = UINT64_MAX;
@@ -758,6 +766,8 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 
 	return 0;
 
+lim_free:
+	plt_free(lf->aura_lim);
 qint_free:
 	plt_free(lf->npa_qint_mem);
 bmap_free:
@@ -780,6 +790,7 @@ npa_dev_fini(struct npa_lf *lf)
 	plt_free(lf->npa_qint_mem);
 	plt_bitmap_free(lf->npa_bmp);
 	plt_free(lf->npa_bmp_mem);
+	plt_free(lf->aura_attr);
 
 	return npa_lf_free(lf->mbox);
 }
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 69129cb..fed1942 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -714,6 +714,25 @@ int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
 /* Flags to pool create */
 #define ROC_NPA_ZERO_AURA_F BIT(0)
 
+/* Enumerations */
+enum roc_npa_buf_type {
+	/* Aura used for normal pkts */
+	ROC_NPA_BUF_TYPE_PACKET = 0,
+	/* Aura used for ipsec pkts */
+	ROC_NPA_BUF_TYPE_PACKET_IPSEC,
+	/* Aura used as vwqe for normal pkts */
+	ROC_NPA_BUF_TYPE_VWQE,
+	/* Aura used as vwqe for ipsec pkts */
+	ROC_NPA_BUF_TYPE_VWQE_IPSEC,
+	/* Aura used as SQB for SQ */
+	ROC_NPA_BUF_TYPE_SQB,
+	/* Aura used for general buffer */
+	ROC_NPA_BUF_TYPE_BUF,
+	/* Aura used for timeout pool */
+	ROC_NPA_BUF_TYPE_TIMEOUT,
+	ROC_NPA_BUF_TYPE_END,
+};
+
 /* NPA pool */
 int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 				  uint32_t block_count, struct npa_aura_s *aura,
@@ -726,6 +745,9 @@ void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 					 uint64_t start_iova,
 					 uint64_t end_iova);
 uint64_t __roc_api roc_npa_zero_aura_handle(void);
+int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
+uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
+uint64_t __roc_api roc_npa_buf_type_limit_get(uint64_t type_mask);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h
index de3d544..d2118cc 100644
--- a/drivers/common/cnxk/roc_npa_priv.h
+++ b/drivers/common/cnxk/roc_npa_priv.h
@@ -18,6 +18,7 @@ enum npa_error_status {
 
 struct npa_lf {
 	struct plt_intr_handle *intr_handle;
+	struct npa_aura_attr *aura_attr;
 	struct npa_aura_lim *aura_lim;
 	struct plt_pci_device *pci_dev;
 	struct plt_bitmap *npa_bmp;
@@ -25,6 +26,7 @@ struct npa_lf {
 	uint32_t stack_pg_ptrs;
 	uint32_t stack_pg_bytes;
 	uint16_t npa_msixoff;
+	bool zero_aura_rsvd;
 	void *npa_qint_mem;
 	void *npa_bmp_mem;
 	uint32_t nr_pools;
@@ -32,7 +34,7 @@ struct npa_lf {
 	uint8_t aura_sz;
 	uint32_t qints;
 	uintptr_t base;
-	bool zero_aura_rsvd;
+
 };
 
 struct npa_qint {
@@ -45,6 +47,10 @@ struct npa_aura_lim {
 	uint64_t ptr_end;
 };
 
+struct npa_aura_attr {
+	int buf_type[ROC_NPA_BUF_TYPE_END];
+};
+
 struct dev;
 
 static inline struct npa *
diff --git a/drivers/common/cnxk/roc_npa_type.c b/drivers/common/cnxk/roc_npa_type.c
new file mode 100644
index 0000000..ed90138
--- /dev/null
+++ b/drivers/common/cnxk/roc_npa_type.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+int
+roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int count)
+{
+	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_lf *lf;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL || aura_id >= lf->nr_pools)
+		return NPA_ERR_PARAM;
+
+	if (plt_bitmap_get(lf->npa_bmp, aura_id)) {
+		plt_err("Cannot set buf type on unused aura");
+		return NPA_ERR_PARAM;
+	}
+
+	if (type >= ROC_NPA_BUF_TYPE_END || (lf->aura_attr[aura_id].buf_type[type] + count < 0)) {
+		plt_err("Pool buf type invalid");
+		return NPA_ERR_PARAM;
+	}
+
+	lf->aura_attr[aura_id].buf_type[type] += count;
+	plt_wmb();
+	return 0;
+}
+
+uint64_t
+roc_npa_buf_type_mask(uint64_t aura_handle)
+{
+	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	uint64_t type_mask = 0;
+	struct npa_lf *lf;
+	int type;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL || aura_id >= lf->nr_pools) {
+		plt_err("Invalid aura id or lf");
+		return 0;
+	}
+
+	if (plt_bitmap_get(lf->npa_bmp, aura_id)) {
+		plt_err("Cannot get buf_type on unused aura");
+		return 0;
+	}
+
+	for (type = 0; type < ROC_NPA_BUF_TYPE_END; type++) {
+		if (lf->aura_attr[aura_id].buf_type[type])
+			type_mask |= BIT_ULL(type);
+	}
+
+	return type_mask;
+}
+
+uint64_t
+roc_npa_buf_type_limit_get(uint64_t type_mask)
+{
+	uint64_t wdata, reg;
+	uint64_t limit = 0;
+	struct npa_lf *lf;
+	uint64_t aura_id;
+	int64_t *addr;
+	uint64_t val;
+	int type;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL)
+		return NPA_ERR_PARAM;
+
+	for (aura_id = 0; aura_id < lf->nr_pools; aura_id++) {
+		if (plt_bitmap_get(lf->npa_bmp, aura_id))
+			continue;
+
+		/* Find aura's matching the buf_types requested */
+		if (type_mask != 0) {
+			val = 0;
+			for (type = 0; type < ROC_NPA_BUF_TYPE_END; type++) {
+				if (lf->aura_attr[aura_id].buf_type[type] != 0)
+					val |= BIT_ULL(type);
+			}
+			if ((val & type_mask) == 0)
+				continue;
+		}
+
+		wdata = aura_id << 44;
+		addr = (int64_t *)(lf->base + NPA_LF_AURA_OP_LIMIT);
+		reg = roc_atomic64_add_nosync(wdata, addr);
+
+		if (!(reg & BIT_ULL(42)))
+			limit += (reg & ROC_AURA_OP_LIMIT_MASK);
+	}
+
+	return limit;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6c05e89..6f3de2a 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -306,6 +306,9 @@ INTERNAL {
 	roc_nix_vlan_mcam_entry_write;
 	roc_nix_vlan_strip_vtag_ena_dis;
 	roc_nix_vlan_tpid_set;
+	roc_npa_buf_type_mask;
+	roc_npa_buf_type_limit_get;
+	roc_npa_buf_type_update;
 	roc_npa_aura_drop_set;
 	roc_npa_aura_limit_modify;
 	roc_npa_aura_op_range_set;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 09/23] common/cnxk: update attributes to pools used by NIX
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (6 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 08/23] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 10/23] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
                   ` (16 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Update attributes to pools used by NIX.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_queue.c | 112 +++++++++++++++++++++++++++++++++++-
 1 file changed, 110 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 70b4516..98b9fb4 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -140,6 +140,96 @@ roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid)
 	return sso_enable ? true : false;
 }
 
+static int
+nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set)
+{
+	struct roc_nix *roc_nix = rq->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix);
+	uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0;
+	struct mbox *mbox = nix->dev.mbox;
+	uint64_t aura_base;
+	int rc, count;
+
+	count = set ? 1 : -1;
+	/* For buf type set, use info from RQ context */
+	if (set) {
+		lpb_aura = rq->aura_handle;
+		spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0;
+		vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0;
+		goto skip_ctx_read;
+	}
+
+	aura_base = roc_npa_aura_handle_to_base(rq->aura_handle);
+	if (roc_model_is_cn9k()) {
+		struct nix_aq_enq_rsp *rsp;
+		struct nix_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+	} else {
+		struct nix_cn10k_aq_enq_rsp *rsp;
+		struct nix_cn10k_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+		if (rsp->rq.vwqe_ena)
+			vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base);
+	}
+
+skip_ctx_read:
+	/* Update attributes for LPB aura */
+	if (inl_inb_ena)
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+	else
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+
+	/* Update attributes for SPB aura */
+	if (spb_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+		else
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+	}
+
+	/* Update attributes for VWQE aura */
+	if (vwqe_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count);
+		else
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count);
+	}
+
+	return 0;
+}
+
 int
 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		bool cfg, bool ena)
@@ -292,7 +382,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 			/* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
 			aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
 			aq->rq.vtime_wait = rq->vwqe_wait_tmo;
-			aq->rq.wqe_aura = rq->vwqe_aura_handle;
+			aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle);
 		}
 	} else {
 		/* CQ mode */
@@ -463,6 +553,9 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura buf type to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
@@ -481,6 +574,9 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rq->qid >= nix->nb_rx_queues)
 		return NIX_ERR_QUEUE_INVALID_RANGE;
 
+	/* Clear attributes for existing aura's */
+	nix_rq_aura_buf_type_update(rq, false);
+
 	rq->roc_nix = roc_nix;
 
 	if (is_cn9k)
@@ -495,14 +591,25 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura attribute to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
 int
 roc_nix_rq_fini(struct roc_nix_rq *rq)
 {
+	int rc;
+
 	/* Disabling RQ is sufficient */
-	return roc_nix_rq_ena_dis(rq, false);
+	rc = roc_nix_rq_ena_dis(rq, false);
+	if (rc)
+		return rc;
+
+	/* Update aura attribute to indicate its use for */
+	nix_rq_aura_buf_type_update(rq, false);
+	return 0;
 }
 
 int
@@ -717,6 +824,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	if (rc)
 		goto fail;
 
+	roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1);
 	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
 	if (sq->sqe_mem == NULL) {
 		rc = NIX_ERR_NO_MEM;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 10/23] common/cnxk: support zero aura for inline inbound meta
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (7 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 09/23] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 11/23] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
                   ` (15 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Add support to create zero aura for inline inbound meta pkts when platform
supports it. Aura zero will hold as many buffers as all the available
pkt pool with a data to accommodate 384B in best case to store
meta packets coming from Inline IPsec.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_idev.c         |  10 ++
 drivers/common/cnxk/roc_idev.h         |   1 +
 drivers/common/cnxk/roc_idev_priv.h    |   9 ++
 drivers/common/cnxk/roc_nix.h          |   1 +
 drivers/common/cnxk/roc_nix_inl.c      | 211 +++++++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.h      |   8 ++
 drivers/common/cnxk/roc_nix_inl_dev.c  |   2 +
 drivers/common/cnxk/roc_nix_inl_priv.h |   4 +
 drivers/common/cnxk/roc_nix_priv.h     |   1 +
 drivers/common/cnxk/roc_nix_queue.c    |  19 +++
 drivers/common/cnxk/version.map        |   4 +
 11 files changed, 270 insertions(+)

diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index a08c7ce..4d2eff9 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -241,3 +241,13 @@ idev_sso_set(struct roc_sso *sso)
 	if (idev != NULL)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
+
+uint64_t
+roc_idev_nix_inl_meta_aura_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->inl_cfg.meta_aura;
+	return 0;
+}
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 16793c2..926aac0 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -16,5 +16,6 @@ struct roc_cpt *__roc_api roc_idev_cpt_get(void);
 void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
+uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
 
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 46eebff..315cc6f 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -10,6 +10,14 @@ struct npa_lf;
 struct roc_bphy;
 struct roc_cpt;
 struct nix_inl_dev;
+
+struct idev_nix_inl_cfg {
+	uint64_t meta_aura;
+	uint32_t nb_bufs;
+	uint32_t buf_sz;
+	uint32_t refs;
+};
+
 struct idev_cfg {
 	uint16_t sso_pf_func;
 	uint16_t npa_pf_func;
@@ -23,6 +31,7 @@ struct idev_cfg {
 	struct roc_cpt *cpt;
 	struct roc_sso *sso;
 	struct nix_inl_dev *nix_inl_dev;
+	struct idev_nix_inl_cfg inl_cfg;
 	plt_spinlock_t nix_inl_dev_lock;
 };
 
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 3ad3a7e..5f5f5f9 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -315,6 +315,7 @@ struct roc_nix_rq {
 	bool spb_drop_ena;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
+	uint64_t meta_aura_handle;
 	uint16_t inl_dev_refs;
 };
 
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index c621867..507a153 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -6,6 +6,7 @@
 #include "roc_priv.h"
 
 uint32_t soft_exp_consumer_cnt;
+roc_nix_inl_meta_pool_cb_t meta_pool_cb;
 
 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
@@ -19,6 +20,155 @@ PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
 		  1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
 
 static int
+nix_inl_meta_aura_destroy(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	int rc;
+
+	if (!idev)
+		return -EINVAL;
+
+	inl_cfg = &idev->inl_cfg;
+	/* Destroy existing Meta aura */
+	if (inl_cfg->meta_aura) {
+		uint64_t avail, limit;
+
+		/* Check if all buffers are back to pool */
+		avail = roc_npa_aura_op_available(inl_cfg->meta_aura);
+		limit = roc_npa_aura_op_limit_get(inl_cfg->meta_aura);
+		if (avail != limit)
+			plt_warn("Not all buffers are back to meta pool,"
+				 " %" PRIu64 " != %" PRIu64, avail, limit);
+
+		rc = meta_pool_cb(&inl_cfg->meta_aura, 0, 0, true);
+		if (rc) {
+			plt_err("Failed to destroy meta aura, rc=%d", rc);
+			return rc;
+		}
+		inl_cfg->meta_aura = 0;
+		inl_cfg->buf_sz = 0;
+		inl_cfg->nb_bufs = 0;
+		inl_cfg->refs = 0;
+	}
+
+	return 0;
+}
+
+static int
+nix_inl_meta_aura_create(struct idev_cfg *idev, uint16_t first_skip)
+{
+	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	struct idev_nix_inl_cfg *inl_cfg;
+	struct nix_inl_dev *nix_inl_dev;
+	uint32_t nb_bufs, buf_sz;
+	int rc;
+
+	inl_cfg = &idev->inl_cfg;
+	nix_inl_dev = idev->nix_inl_dev;
+
+	/* Override meta buf count from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+		nb_bufs = nix_inl_dev->nb_meta_bufs;
+	else
+		nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+	/* Override meta buf size from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+		buf_sz = nix_inl_dev->meta_buf_sz;
+	else
+		buf_sz = first_skip + NIX_INL_META_SIZE;
+
+	/* Allocate meta aura */
+	rc = meta_pool_cb(&inl_cfg->meta_aura, buf_sz, nb_bufs, false);
+	if (rc) {
+		plt_err("Failed to allocate meta aura, rc=%d", rc);
+		return rc;
+	}
+
+	inl_cfg->buf_sz = buf_sz;
+	inl_cfg->nb_bufs = nb_bufs;
+	return 0;
+}
+
+int
+roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	uint32_t actual, expected;
+	uint64_t mask, type_mask;
+	int rc;
+
+	if (!idev || !meta_pool_cb)
+		return -EFAULT;
+	inl_cfg = &idev->inl_cfg;
+
+	/* Create meta aura if not present */
+	if (!inl_cfg->meta_aura) {
+		rc = nix_inl_meta_aura_create(idev, rq->first_skip);
+		if (rc)
+			return rc;
+	}
+
+	/* Validate if we have enough meta buffers */
+	mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	expected = roc_npa_buf_type_limit_get(mask);
+	actual = inl_cfg->nb_bufs;
+
+	if (actual < expected) {
+		plt_err("Insufficient buffers in meta aura %u < %u (expected)",
+			actual, expected);
+		return -EIO;
+	}
+
+	/* Validate if we have enough space for meta buffer */
+	if (rq->first_skip + NIX_INL_META_SIZE > inl_cfg->buf_sz) {
+		plt_err("Meta buffer size %u not sufficient to meet RQ first skip %u",
+			inl_cfg->buf_sz, rq->first_skip);
+		return -EIO;
+	}
+
+	/* Validate if we have enough VWQE buffers */
+	if (rq->vwqe_ena) {
+		actual = roc_npa_aura_op_limit_get(rq->vwqe_aura_handle);
+
+		type_mask = roc_npa_buf_type_mask(rq->vwqe_aura_handle);
+		if (type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE_IPSEC) &&
+		    type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE)) {
+			/* VWQE aura shared b/w Inline enabled and non Inline
+			 * enabled ports needs enough buffers to store all the
+			 * packet buffers, one per vwqe.
+			 */
+			mask = (BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC) |
+				BIT_ULL(ROC_NPA_BUF_TYPE_PACKET));
+			expected = roc_npa_buf_type_limit_get(mask);
+
+			if (actual < expected) {
+				plt_err("VWQE aura shared b/w Inline inbound and non-Inline inbound "
+					"ports needs vwqe bufs(%u) minimum of all pkt bufs (%u)",
+					actual, expected);
+				return -EIO;
+			}
+		} else {
+			/* VWQE aura not shared b/w Inline and non Inline ports have relaxed
+			 * requirement of match all the meta buffers.
+			 */
+			expected = inl_cfg->nb_bufs;
+
+			if (actual < expected) {
+				plt_err("VWQE aura not shared b/w Inline inbound and non-Inline "
+					"ports needs vwqe bufs(%u) minimum of all meta bufs (%u)",
+					actual, expected);
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int
 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 {
 	uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
@@ -310,6 +460,10 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	if (rc)
 		return rc;
 
+	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+		nix->need_meta_aura = true;
+		idev->inl_cfg.refs++;
+	}
 	nix->inl_inb_ena = true;
 	return 0;
 }
@@ -317,12 +471,22 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 int
 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 {
+	struct idev_cfg *idev = idev_get_cfg();
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 
 	if (!nix->inl_inb_ena)
 		return 0;
 
+	if (!idev)
+		return -EFAULT;
+
 	nix->inl_inb_ena = false;
+	if (nix->need_meta_aura) {
+		nix->need_meta_aura = false;
+		idev->inl_cfg.refs--;
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy();
+	}
 
 	/* Flush Inbound CTX cache entries */
 	roc_nix_cpt_ctx_cache_sync(roc_nix);
@@ -592,6 +756,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
 int
 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 {
+	struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	int port_id = rq->roc_nix->port_id;
 	struct nix_inl_dev *inl_dev;
@@ -603,6 +768,10 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 	if (idev == NULL)
 		return 0;
 
+	/* Update meta aura handle in RQ */
+	if (nix->need_meta_aura)
+		rq->meta_aura_handle = roc_npa_zero_aura_handle();
+
 	inl_dev = idev->nix_inl_dev;
 	/* Nothing to do if no inline device */
 	if (!inl_dev)
@@ -705,6 +874,13 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 		return rc;
 	}
 
+	/* Check meta aura */
+	if (enable && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	inl_rq->inl_dev_refs++;
 	rq->inl_dev_refs = 1;
 	return 0;
@@ -724,6 +900,7 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 	if (idev == NULL)
 		return 0;
 
+	rq->meta_aura_handle = 0;
 	if (!rq->inl_dev_refs)
 		return 0;
 
@@ -779,6 +956,9 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
 		rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
 		if (rc)
 			return rc;
+
+		if (enable && nix->need_meta_aura)
+			return roc_nix_inl_meta_aura_check(inl_rq);
 	}
 	return 0;
 }
@@ -792,6 +972,31 @@ roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
 	nix->inb_inl_dev = use_inl_dev;
 }
 
+void
+roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (!idev)
+		return;
+	/* Need to set here for cases when inbound SA table is
+	 * managed outside RoC.
+	 */
+	nix->inl_inb_ena = ena;
+	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+		if (ena) {
+			nix->need_meta_aura = true;
+			idev->inl_cfg.refs++;
+		} else if (nix->need_meta_aura) {
+			nix->need_meta_aura = false;
+			idev->inl_cfg.refs--;
+			if (!idev->inl_cfg.refs)
+				nix_inl_meta_aura_destroy();
+		}
+	}
+}
+
 int
 roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
 {
@@ -1128,3 +1333,9 @@ roc_nix_inl_dev_unlock(void)
 	if (idev != NULL)
 		plt_spinlock_unlock(&idev->nix_inl_dev_lock);
 }
+
+void
+roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
+{
+	meta_pool_cb = cb;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 702ec01..9911a48 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -121,6 +121,9 @@ roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(void *sa)
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 					  uint32_t soft_exp_event);
 
+typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle, uint32_t blk_sz, uint32_t nb_bufs,
+					  bool destroy);
+
 struct roc_nix_inl_dev {
 	/* Input parameters */
 	struct plt_pci_device *pci_dev;
@@ -135,6 +138,8 @@ struct roc_nix_inl_dev {
 	uint8_t spb_drop_pc;
 	uint8_t lpb_drop_pc;
 	bool set_soft_exp_poll;
+	uint32_t nb_meta_bufs;
+	uint32_t meta_buf_sz;
 	/* End of input parameters */
 
 #define ROC_NIX_INL_MEM_SZ (1280)
@@ -165,6 +170,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
 uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
 					   bool inl_dev_sa, uint32_t spi);
 void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
+void __roc_api roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena);
 int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
 int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
 bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
@@ -176,6 +182,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
 				       bool inb_inl_dev);
 int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
+int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq);
 
 /* NIX Inline Outbound API */
 int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@@ -191,6 +198,7 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
 int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
 uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
 
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 3a96498..1e9b2b9 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -841,6 +841,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
 	inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
 	inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
+	inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
+	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
 
 	if (roc_inl_dev->spb_drop_pc)
 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index a775efc..ccd2adf 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -6,6 +6,8 @@
 #include <pthread.h>
 #include <sys/types.h>
 
+#define NIX_INL_META_SIZE 384u
+
 struct nix_inl_dev;
 struct nix_inl_qint {
 	struct nix_inl_dev *inl_dev;
@@ -86,6 +88,8 @@ struct nix_inl_dev {
 	bool attach_cptlf;
 	uint16_t wqe_skip;
 	bool ts_ena;
+	uint32_t nb_meta_bufs;
+	uint32_t meta_buf_sz;
 };
 
 int nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index a3d4ddf..a253f41 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -202,6 +202,7 @@ struct nix {
 	uint16_t nb_cpt_lf;
 	uint16_t outb_se_ring_cnt;
 	uint16_t outb_se_ring_base;
+	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
 
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 98b9fb4..b197de0 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -89,7 +89,12 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
 
 	rc = nix_rq_ena_dis(&nix->dev, rq, enable);
 	nix_rq_vwqe_flush(rq, nix->vwqe_interval);
+	if (rc)
+		return rc;
 
+	/* Check for meta aura if RQ is enabled */
+	if (enable && nix->need_meta_aura)
+		rc = roc_nix_inl_meta_aura_check(rq);
 	return rc;
 }
 
@@ -556,6 +561,13 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	/* Update aura buf type to indicate its use */
 	nix_rq_aura_buf_type_update(rq, true);
 
+	/* Check for meta aura if RQ is enabled */
+	if (ena && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	return nix_tel_node_add_rq(rq);
 }
 
@@ -594,6 +606,13 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	/* Update aura attribute to indicate its use */
 	nix_rq_aura_buf_type_update(rq, true);
 
+	/* Check for meta aura if RQ is enabled */
+	if (ena && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	return nix_tel_node_add_rq(rq);
 }
 
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6f3de2a..276fec3 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -95,6 +95,7 @@ INTERNAL {
 	roc_idev_npa_maxpools_set;
 	roc_idev_npa_nix_get;
 	roc_idev_num_lmtlines_get;
+	roc_idev_nix_inl_meta_aura_get;
 	roc_model;
 	roc_se_auth_key_set;
 	roc_se_ciph_key_set;
@@ -156,7 +157,10 @@ INTERNAL {
 	roc_nix_inl_inb_sa_sz;
 	roc_nix_inl_inb_tag_update;
 	roc_nix_inl_inb_fini;
+	roc_nix_inl_inb_set;
 	roc_nix_inb_is_with_inl_dev;
+	roc_nix_inl_meta_aura_check;
+	roc_nix_inl_meta_pool_cb_register;
 	roc_nix_inb_mode_set;
 	roc_nix_inl_outb_fini;
 	roc_nix_inl_outb_init;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 11/23] net/cnxk: support for zero aura for inline meta
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (8 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 10/23] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 12/23] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
                   ` (14 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Nithin Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Add support for zero aura for inline meta pkts and register
callback to ROC to create meta pool via mempool. Also
add devargs to override meta buffer count and size.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c      |  8 ++-
 drivers/event/cnxk/cn10k_worker.h        | 32 ++++++-----
 drivers/event/cnxk/cnxk_eventdev.h       |  1 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  2 +-
 drivers/net/cnxk/cn10k_ethdev.c          |  8 ++-
 drivers/net/cnxk/cn10k_ethdev.h          |  2 +-
 drivers/net/cnxk/cn10k_rx.h              | 35 +++++++-----
 drivers/net/cnxk/cnxk_ethdev.c           |  3 +
 drivers/net/cnxk/cnxk_ethdev.h           |  2 +
 drivers/net/cnxk/cnxk_ethdev_sec.c       | 97 +++++++++++++++++++++++++++++++-
 10 files changed, 154 insertions(+), 36 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index db61606..0651b2d 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -694,7 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
 }
 
 static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, uint64_t meta_aura)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int i;
@@ -703,6 +703,8 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 		struct cn10k_sso_hws *ws = event_dev->data->ports[i];
 		ws->lookup_mem = lookup_mem;
 		ws->tstamp = dev->tstamp;
+		if (meta_aura)
+			ws->meta_aura = meta_aura;
 	}
 }
 
@@ -713,6 +715,7 @@ cn10k_sso_rx_adapter_queue_add(
 	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct cn10k_eth_rxq *rxq;
+	uint64_t meta_aura;
 	void *lookup_mem;
 	int rc;
 
@@ -726,7 +729,8 @@ cn10k_sso_rx_adapter_queue_add(
 		return -EINVAL;
 	rxq = eth_dev->data->rx_queues[0];
 	lookup_mem = rxq->lookup_mem;
-	cn10k_sso_set_priv_mem(event_dev, lookup_mem);
+	meta_aura = rxq->meta_aura;
+	cn10k_sso_set_priv_mem(event_dev, lookup_mem, meta_aura);
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
 	return 0;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index db56d96..47ce423 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -127,12 +127,14 @@ cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
 }
 
 static __rte_always_inline void
-cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
-		   void *lookup_mem, void *tstamp, uintptr_t lbase)
+cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struct cn10k_sso_hws *ws)
 {
 	uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
+	struct cnxk_timesync_info *tstamp = ws->tstamp[port_id];
+	void *lookup_mem = ws->lookup_mem;
+	uintptr_t lbase = ws->lmt_base;
 	struct rte_event_vector *vec;
-	uint64_t aura_handle, laddr;
+	uint64_t meta_aura, laddr;
 	uint16_t nb_mbufs, non_vec;
 	uint16_t lmt_id, d_off;
 	struct rte_mbuf **wqe;
@@ -153,25 +155,31 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 	if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
 		mbuf_init |= 8;
 
+	meta_aura = ws->meta_aura;
 	nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
 	nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
-					      flags | NIX_RX_VWQE_F, lookup_mem,
-					      tstamp, lbase);
+					      flags | NIX_RX_VWQE_F,
+					      lookup_mem, tstamp,
+					      lbase, meta_aura);
 	wqe += nb_mbufs;
 	non_vec = vec->nb_elem - nb_mbufs;
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
+		uint64_t sg_w1;
+
 		mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] -
 					   sizeof(struct rte_mbuf));
 		/* Pick first mbuf's aura handle assuming all
 		 * mbufs are from a vec and are from same RQ.
 		 */
-		aura_handle = mbuf->pool->pool_id;
+		meta_aura = ws->meta_aura;
+		if (!meta_aura)
+			meta_aura = mbuf->pool->pool_id;
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 		laddr = lbase;
 		laddr += 8;
-		d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
-		d_off += (mbuf_init & 0xFFFF);
+		sg_w1 = *(uint64_t *)(((uintptr_t)wqe[0]) + 72);
+		d_off = sg_w1 - (uintptr_t)mbuf;
 		sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
 		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	}
@@ -208,7 +216,7 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 
 	/* Free remaining meta buffers if any */
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
-		nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle);
+		nix_sec_flush_meta(laddr, lmt_id, loff, meta_aura);
 		plt_io_wmb();
 	}
 }
@@ -241,8 +249,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 			uint64_t cq_w5;
 
 			m = (struct rte_mbuf *)mbuf;
-			d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
-			d_off += RTE_PKTMBUF_HEADROOM;
+			d_off = (*(uint64_t *)(u64[1] + 72)) - (uintptr_t)m;
 
 			cq_w1 = *(uint64_t *)(u64[1] + 8);
 			cq_w5 = *(uint64_t *)(u64[1] + 40);
@@ -273,8 +280,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
 			   ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
 		*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
-		cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
-				   ws->tstamp[port], ws->lmt_base);
+		cn10k_process_vwqe(u64[1], port, flags, ws);
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index fae4484..d61e60d 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -148,6 +148,7 @@ struct cn10k_sso_hws {
 	uint8_t hws_id;
 	/* PTP timestamp */
 	struct cnxk_timesync_info **tstamp;
+	uint64_t meta_aura;
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 7937cad..5f51c50 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -194,7 +194,7 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
 
 	rq->vwqe_ena = 1;
 	rq->vwqe_first_skip = 0;
-	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
+	rq->vwqe_aura_handle = vmp->pool_id;
 	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
 	rq->vwqe_wait_tmo =
 		tmo_ns /
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 80c5c0e..e8faeeb 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -282,9 +282,13 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		rxq->lmt_base = dev->nix.lmt_base;
 		rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
 							   dev->inb.inl_dev);
+		rxq->meta_aura = rq->meta_aura_handle;
+		rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+		/* Assume meta packet from normal aura if meta aura is not setup
+		 */
+		if (!rxq->meta_aura)
+			rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
 	}
-	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
-	rxq->aura_handle = rxq_sp->qconf.mp->pool_id;
 
 	/* Lookup mem */
 	rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h
index acfdbb6..d0a5b13 100644
--- a/drivers/net/cnxk/cn10k_ethdev.h
+++ b/drivers/net/cnxk/cn10k_ethdev.h
@@ -39,7 +39,7 @@ struct cn10k_eth_rxq {
 	uint16_t data_off;
 	uint64_t sa_base;
 	uint64_t lmt_base;
-	uint64_t aura_handle;
+	uint64_t meta_aura;
 	uint16_t rq;
 	struct cnxk_timesync_info *tstamp;
 } __plt_cache_aligned;
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 0f8790b..2cd297e 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -877,7 +877,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
-		aura_handle = rxq->aura_handle;
+		aura_handle = rxq->meta_aura;
 		sa_base = rxq->sa_base;
 		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
@@ -984,7 +984,7 @@ static __rte_always_inline uint16_t
 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			   const uint16_t flags, void *lookup_mem,
 			   struct cnxk_timesync_info *tstamp,
-			   uintptr_t lmt_base)
+			   uintptr_t lmt_base, uint64_t meta_aura)
 {
 	struct cn10k_eth_rxq *rxq = args;
 	const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
@@ -1003,10 +1003,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
 	uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
-	uint64_t aura_handle, lbase, laddr;
 	uint8_t loff = 0, lnum = 0, shft = 0;
 	uint8x16_t f0, f1, f2, f3;
 	uint16_t lmt_id, d_off;
+	uint64_t lbase, laddr;
 	uint16_t packets = 0;
 	uint16_t pkts_left;
 	uintptr_t sa_base;
@@ -1035,6 +1035,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
 		if (flags & NIX_RX_VWQE_F) {
+			uint64_t sg_w1;
 			uint16_t port;
 
 			mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
@@ -1042,10 +1043,15 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			/* Pick first mbuf's aura handle assuming all
 			 * mbufs are from a vec and are from same RQ.
 			 */
-			aura_handle = mbuf0->pool->pool_id;
+			if (!meta_aura)
+				meta_aura = mbuf0->pool->pool_id;
 			/* Calculate offset from mbuf to actual data area */
-			d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
-			d_off += (mbuf_initializer & 0xFFFF);
+			/* Zero aura's first skip i.e mbuf setup might not match the actual
+			 * offset as first skip is taken from second pass RQ. So compute
+			 * using diff b/w first SG pointer and mbuf addr.
+			 */
+			sg_w1 = *(uint64_t *)((uintptr_t)mbufs[0] + 72);
+			d_off = (sg_w1 - (uint64_t)mbuf0);
 
 			/* Get SA Base from lookup tbl using port_id */
 			port = mbuf_initializer >> 48;
@@ -1053,7 +1059,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 
 			lbase = lmt_base;
 		} else {
-			aura_handle = rxq->aura_handle;
+			meta_aura = rxq->meta_aura;
 			d_off = rxq->data_off;
 			sa_base = rxq->sa_base;
 			lbase = rxq->lmt_base;
@@ -1721,7 +1727,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				/* Update aura handle */
 				*(uint64_t *)(laddr - 8) =
 					(((uint64_t)(15 & 0x1) << 32) |
-				    roc_npa_aura_handle_to_aura(aura_handle));
+				    roc_npa_aura_handle_to_aura(meta_aura));
 				loff = loff - 15;
 				shft += 3;
 
@@ -1744,14 +1750,14 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				/* Update aura handle */
 				*(uint64_t *)(laddr - 8) =
 					(((uint64_t)(loff & 0x1) << 32) |
-				    roc_npa_aura_handle_to_aura(aura_handle));
+				    roc_npa_aura_handle_to_aura(meta_aura));
 
 				data = (data & ~(0x7UL << shft)) |
 				       (((uint64_t)loff >> 1) << shft);
 
 				/* Send up to 16 lmt lines of pointers */
 				nix_sec_flush_meta_burst(lmt_id, data, lnum + 1,
-							 aura_handle);
+							 meta_aura);
 				rte_io_wmb();
 				lnum = 0;
 				loff = 0;
@@ -1769,13 +1775,13 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		/* Update aura handle */
 		*(uint64_t *)(laddr - 8) =
 			(((uint64_t)(loff & 0x1) << 32) |
-			 roc_npa_aura_handle_to_aura(aura_handle));
+			 roc_npa_aura_handle_to_aura(meta_aura));
 
 		data = (data & ~(0x7UL << shft)) |
 		       (((uint64_t)loff >> 1) << shft);
 
 		/* Send up to 16 lmt lines of pointers */
-		nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, aura_handle);
+		nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, meta_aura);
 		if (flags & NIX_RX_VWQE_F)
 			plt_io_wmb();
 	}
@@ -1803,7 +1809,7 @@ static inline uint16_t
 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			   const uint16_t flags, void *lookup_mem,
 			   struct cnxk_timesync_info *tstamp,
-			   uintptr_t lmt_base)
+			   uintptr_t lmt_base, uint64_t meta_aura)
 {
 	RTE_SET_USED(args);
 	RTE_SET_USED(mbufs);
@@ -1812,6 +1818,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	RTE_SET_USED(lookup_mem);
 	RTE_SET_USED(tstamp);
 	RTE_SET_USED(lmt_base);
+	RTE_SET_USED(meta_aura);
 
 	return 0;
 }
@@ -2038,7 +2045,7 @@ NIX_RX_FASTPATH_MODES
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
 	{                                                                      \
 		return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts,     \
-						  (flags), NULL, NULL, 0);     \
+						  (flags), NULL, NULL, 0, 0);  \
 	}
 
 #define NIX_RX_RECV_VEC_MSEG(fn, flags)                                        \
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index b3af2f8..02416ad 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1732,6 +1732,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	roc_nix_mac_link_info_get_cb_register(nix,
 					      cnxk_eth_dev_link_status_get_cb);
 
+	/* Register callback for inline meta pool create */
+	roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
+
 	dev->eth_dev = eth_dev;
 	dev->configured = 0;
 	dev->ptype_disable = 0;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 4cb7c9e..be5cecd 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -641,6 +641,8 @@ struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
 struct cnxk_eth_sec_sess *
 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
 			      struct rte_security_session *sess);
+int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+			      bool destroy);
 
 /* Other private functions */
 int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 1de3454..9304b14 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -4,10 +4,14 @@
 
 #include <cnxk_ethdev.h>
 
+#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
+
 #define CNXK_NIX_INL_SELFTEST	      "selftest"
 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
 #define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
 #define CNXK_INL_CPT_CHANNEL	      "inl_cpt_channel"
+#define CNXK_NIX_INL_NB_META_BUFS     "nb_meta_bufs"
+#define CNXK_NIX_INL_META_BUF_SZ      "meta_buf_sz"
 
 struct inl_cpt_channel {
 	bool is_multi_channel;
@@ -29,6 +33,85 @@ bitmap_ctzll(uint64_t slab)
 }
 
 int
+cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs, bool destroy)
+{
+	const char *mp_name = CNXK_NIX_INL_META_POOL_NAME;
+	struct rte_pktmbuf_pool_private mbp_priv;
+	struct npa_aura_s *aura;
+	struct rte_mempool *mp;
+	uint16_t first_skip;
+	int rc;
+
+	/* Destroy the mempool if requested */
+	if (destroy) {
+		mp = rte_mempool_lookup(mp_name);
+		if (!mp)
+			return -ENOENT;
+
+		if (mp->pool_id != *aura_handle) {
+			plt_err("Meta pool aura mismatch");
+			return -EINVAL;
+		}
+
+		plt_free(mp->pool_config);
+		rte_mempool_free(mp);
+
+		*aura_handle = 0;
+		return 0;
+	}
+
+	/* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
+	 * support.
+	 */
+	mp = rte_mempool_create_empty(mp_name, nb_bufs, buf_sz, 0,
+				      sizeof(struct rte_pktmbuf_pool_private),
+				      SOCKET_ID_ANY, 0);
+	if (!mp) {
+		plt_err("Failed to create inline meta pool");
+		return -EIO;
+	}
+
+	/* Indicate to allocate zero aura */
+	aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
+	if (!aura) {
+		rc = -ENOMEM;
+		goto free_mp;
+	}
+	aura->ena = 1;
+	aura->pool_addr = 0x0;
+
+	rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+					aura);
+	if (rc) {
+		plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
+		goto free_aura;
+	}
+
+	/* Init mempool private area */
+	first_skip = sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+	memset(&mbp_priv, 0, sizeof(mbp_priv));
+	mbp_priv.mbuf_data_room_size = (buf_sz - first_skip +
+					RTE_PKTMBUF_HEADROOM);
+	rte_pktmbuf_pool_init(mp, &mbp_priv);
+
+	/* Populate buffer */
+	rc = rte_mempool_populate_default(mp);
+	if (rc < 0) {
+		plt_err("Failed to create inline meta pool, rc=%d", rc);
+		goto free_aura;
+	}
+
+	rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
+	*aura_handle = mp->pool_id;
+	return 0;
+free_aura:
+	plt_free(aura);
+free_mp:
+	rte_mempool_free(mp);
+	return rc;
+}
+
+int
 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
 			 uint32_t spi)
 {
@@ -128,7 +211,7 @@ struct rte_security_ops cnxk_eth_sec_ops = {
 };
 
 static int
-parse_ipsec_in_spi_range(const char *key, const char *value, void *extra_args)
+parse_val_u32(const char *key, const char *value, void *extra_args)
 {
 	RTE_SET_USED(key);
 	uint32_t val;
@@ -184,6 +267,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	uint32_t ipsec_in_min_spi = 0;
 	struct inl_cpt_channel cpt_channel;
 	struct rte_kvargs *kvlist;
+	uint32_t nb_meta_bufs = 0;
+	uint32_t meta_buf_sz = 0;
 	uint8_t selftest = 0;
 
 	memset(&cpt_channel, 0, sizeof(cpt_channel));
@@ -198,11 +283,15 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
 			   &selftest);
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MIN_SPI,
-			   &parse_ipsec_in_spi_range, &ipsec_in_min_spi);
+			   &parse_val_u32, &ipsec_in_min_spi);
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
-			   &parse_ipsec_in_spi_range, &ipsec_in_max_spi);
+			   &parse_val_u32, &ipsec_in_max_spi);
 	rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel,
 			   &cpt_channel);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_NB_META_BUFS, &parse_val_u32,
+			   &nb_meta_bufs);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_META_BUF_SZ, &parse_val_u32,
+			   &meta_buf_sz);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -212,6 +301,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	inl_dev->channel = cpt_channel.channel;
 	inl_dev->chan_mask = cpt_channel.mask;
 	inl_dev->is_multi_channel = cpt_channel.is_multi_channel;
+	inl_dev->nb_meta_bufs = nb_meta_bufs;
+	inl_dev->meta_buf_sz = meta_buf_sz;
 	return 0;
 exit:
 	return -EINVAL;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 12/23] common/cnxk: avoid the use of platform specific APIs
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (9 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 11/23] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 13/23] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
                   ` (13 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Replace the use of platform specific APIs with platform independent
APIs.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/roc_cpt.c    | 8 ++++----
 drivers/common/cnxk/roc_cpt.h    | 2 +-
 drivers/crypto/cnxk/cn9k_ipsec.c | 8 ++++----
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index d607bde..6f0ee44 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -998,7 +998,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 }
 
 int
-roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
 		     uint16_t ctx_len, uint8_t egrp)
 {
 	union cpt_res_s res, *hw_res;
@@ -1019,9 +1019,9 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
 	inst.w4.s.param1 = 0;
 	inst.w4.s.param2 = 0;
 	inst.w4.s.dlen = ctx_len;
-	inst.dptr = rte_mempool_virt2iova(sa);
+	inst.dptr = sa;
 	inst.rptr = 0;
-	inst.w7.s.cptr = rte_mempool_virt2iova(sa);
+	inst.w7.s.cptr = sa;
 	inst.w7.s.egrp = egrp;
 
 	inst.w0.u64 = 0;
@@ -1029,7 +1029,7 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
 	inst.w3.u64 = 0;
 	inst.res_addr = (uintptr_t)hw_res;
 
-	rte_io_wmb();
+	plt_io_wmb();
 
 	do {
 		/* Copy CPT command to LMTLINE */
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 4e3a078..6953f2b 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -173,7 +173,7 @@ void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
-int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa,
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa,
 				   uint8_t opcode, uint16_t ctx_len,
 				   uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 6d26b0c..78c181b 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -82,8 +82,8 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	ctx_len = ret;
 	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->out_sa, opcode,
-				   ctx_len, egrp);
+	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->out_sa),
+				   opcode, ctx_len, egrp);
 
 	if (ret)
 		return ret;
@@ -174,8 +174,8 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	ctx_len = ret;
 	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->in_sa, opcode, ctx_len,
-				   egrp);
+	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->in_sa),
+				   opcode, ctx_len, egrp);
 	if (ret)
 		return ret;
 
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 13/23] net/cnxk: use full context IPsec structures in fp
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (10 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 12/23] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 14/23] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
                   ` (12 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj, Pavan Nikhilesh,
	Shijith Thotton
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Use the Full context SA structures and command in IPsec fast path.
For inline outbound, populate CPT instruction as per Full context.
Add new macros and functions with respect to Full context.
Populate wqe ptr in CPT instruction with proper offset from mbuf.
Add option to override outbound inline sa iv for debug
Update mbuf len based on IP version in rx post process
purposes via environment variable. User can set env variable as:
export ETH_SEC_IV_OVR="0x0, 0x0,..."

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c          |   8 +-
 drivers/common/cnxk/roc_cpt.c                |   9 ++-
 drivers/common/cnxk/roc_cpt.h                |   8 +-
 drivers/common/cnxk/roc_ie_on.h              |   6 ++
 drivers/common/cnxk/roc_nix_inl.c            |  33 +++++---
 drivers/common/cnxk/roc_nix_inl.h            |  46 +++++++++++
 drivers/common/cnxk/roc_nix_inl_dev.c        |   2 +-
 drivers/crypto/cnxk/cn9k_ipsec.c             |   8 +-
 drivers/event/cnxk/cn9k_worker.h             |  48 +++++++-----
 drivers/net/cnxk/cn9k_ethdev.h               |   3 +
 drivers/net/cnxk/cn9k_ethdev_sec.c           | 111 ++++++++++++++++++++++-----
 drivers/net/cnxk/cn9k_rx.h                   |  43 +++++++----
 drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c |  32 +++-----
 13 files changed, 255 insertions(+), 102 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index dca8742..89ac900 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -1242,7 +1242,9 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 			ctx_len += sizeof(template->ip4);
 
 			ip4->version_ihl = RTE_IPV4_VHL_DEF;
-			ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
+			ip4->time_to_live = ipsec->tunnel.ipv4.ttl ?
+						    ipsec->tunnel.ipv4.ttl :
+						    0x40;
 			ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
 			if (ipsec->tunnel.ipv4.df)
 				frag_off |= RTE_IPV4_HDR_DF_FLAG;
@@ -1275,7 +1277,9 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 						 ((ipsec->tunnel.ipv6.flabel
 						   << RTE_IPV6_HDR_FL_SHIFT) &
 						  RTE_IPV6_HDR_FL_MASK));
-			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
+			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit ?
+						  ipsec->tunnel.ipv6.hlimit :
+						  0x40;
 			memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
 			       sizeof(struct in6_addr));
 			memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 6f0ee44..8fc072b 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -277,7 +277,7 @@ roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 
 int
 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
-			     uint16_t param2)
+			     uint16_t param2, uint16_t opcode)
 {
 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 	struct cpt_rx_inline_lf_cfg_msg *req;
@@ -292,6 +292,7 @@ roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 	req->sso_pf_func = idev_sso_pffunc_get();
 	req->param1 = param1;
 	req->param2 = param2;
+	req->opcode = opcode;
 
 	return mbox_process(mbox);
 }
@@ -998,7 +999,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 }
 
 int
-roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, bool inb,
 		     uint16_t ctx_len, uint8_t egrp)
 {
 	union cpt_res_s res, *hw_res;
@@ -1014,7 +1015,9 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
 
 	hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
 
-	inst.w4.s.opcode_major = opcode;
+	inst.w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
+	if (inb)
+		inst.w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	inst.w4.s.opcode_minor = ctx_len >> 3;
 	inst.w4.s.param1 = 0;
 	inst.w4.s.param2 = 0;
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 6953f2b..9a79998 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -161,7 +161,8 @@ int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(
 	struct roc_cpt *roc_cpt, struct nix_inline_ipsec_cfg *inb_cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
-					   uint16_t param1, uint16_t param2);
+					   uint16_t param1, uint16_t param2,
+					   uint16_t opcode);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
 int __roc_api roc_cpt_lfs_print(struct roc_cpt *roc_cpt);
 void __roc_api roc_cpt_iq_disable(struct roc_cpt_lf *lf);
@@ -173,7 +174,6 @@ void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
-int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa,
-				   uint8_t opcode, uint16_t ctx_len,
-				   uint8_t egrp);
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, bool inb,
+				   uint16_t ctx_len, uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 2d93cb6..961d5fc 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -13,6 +13,12 @@
 #define ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC 0x23
 #define ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC  0x24
 
+#define ROC_IE_ON_INB_MAX_CTX_LEN	       34UL
+#define ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT  (1 << 12)
+#define ROC_IE_ON_OUTB_MAX_CTX_LEN	       31UL
+#define ROC_IE_ON_OUTB_IKEV2_SINGLE_SA_SUPPORT (1 << 9)
+#define ROC_IE_ON_OUTB_PER_PKT_IV	       (1 << 11)
+
 /* Ucode completion codes */
 enum roc_ie_on_ucc_ipsec {
 	ROC_IE_ON_UCC_SUCCESS = 0,
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 507a153..be0b806 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -8,11 +8,11 @@
 uint32_t soft_exp_consumer_cnt;
 roc_nix_inl_meta_pool_cb_t meta_pool_cb;
 
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
-		  1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
-		  1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ ==
+		  1UL << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ == 1024);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ ==
+		  1UL << ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
@@ -184,7 +184,7 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 
 	/* CN9K SA size is different */
 	if (roc_model_is_cn9k())
-		inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
+		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
 	else
 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
 
@@ -422,7 +422,9 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	struct roc_cpt *roc_cpt;
+	uint16_t opcode;
 	uint16_t param1;
+	uint16_t param2;
 	int rc;
 
 	if (idev == NULL)
@@ -439,17 +441,23 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	}
 
 	if (roc_model_is_cn9k()) {
-		param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
+		param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
+		param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
+		opcode =
+			((ROC_IE_ON_INB_MAX_CTX_LEN << 8) |
+			 (ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6)));
 	} else {
 		union roc_ot_ipsec_inb_param1 u;
 
 		u.u16 = 0;
 		u.s.esp_trailer_disable = 1;
 		param1 = u.u16;
+		param2 = 0;
+		opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
 	}
 
 	/* Do onetime Inbound Inline config in CPTPF */
-	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
+	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, param2, opcode);
 	if (rc && rc != -EEXIST) {
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
@@ -605,7 +613,7 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 
 	/* CN9K SA size is different */
 	if (roc_model_is_cn9k())
-		sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
+		sa_sz = ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ;
 	else
 		sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
 	/* Alloc contiguous memory of outbound SA */
@@ -1212,7 +1220,12 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
 
 	/* Nothing much to do on cn9k */
 	if (roc_model_is_cn9k()) {
-		plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+		nix = roc_nix_to_nix_priv(roc_nix);
+		outb_lf = nix->cpt_lf_base;
+		rc = roc_on_cpt_ctx_write(outb_lf, (uint64_t)sa_dptr, inb,
+					  sa_len, ROC_CPT_DFLT_ENG_GRP_SE_IE);
+		if (rc)
+			return rc;
 		return 0;
 	}
 
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 9911a48..555cb28 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -22,6 +22,24 @@
 	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
 #define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
 
+/* ON INB HW area */
+#define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ                                         \
+	PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN)
+/* ON INB SW reserved area */
+#define ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD 640
+#define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ                                         \
+	(ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD)
+#define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10
+
+/* ONF OUTB HW area */
+#define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ                                        \
+	PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN)
+/* ONF OUTB SW reserved area */
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ                                        \
+	(ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD)
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2 9
+
 /* OT INB HW area */
 #define ROC_NIX_INL_OT_IPSEC_INB_HW_SZ                                         \
 	PLT_ALIGN(sizeof(struct roc_ot_ipsec_inb_sa), ROC_ALIGN)
@@ -61,6 +79,34 @@
 #define ROC_NIX_INL_REAS_ZOMBIE_LIMIT	  0xFFF
 #define ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD 10
 
+static inline struct roc_ie_on_inb_sa *
+roc_nix_inl_on_ipsec_inb_sa(uintptr_t base, uint64_t idx)
+{
+	uint64_t off = idx << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2;
+
+	return PLT_PTR_ADD(base, off);
+}
+
+static inline struct roc_ie_on_outb_sa *
+roc_nix_inl_on_ipsec_outb_sa(uintptr_t base, uint64_t idx)
+{
+	uint64_t off = idx << ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2;
+
+	return PLT_PTR_ADD(base, off);
+}
+
+static inline void *
+roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(void *sa)
+{
+	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_INB_HW_SZ);
+}
+
+static inline void *
+roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa)
+{
+	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ);
+}
+
 static inline struct roc_onf_ipsec_inb_sa *
 roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
 {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 1e9b2b9..4fe7b51 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -394,7 +394,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
 
 	/* CN9K SA is different */
 	if (roc_model_is_cn9k())
-		inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
+		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
 	else
 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
 
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 78c181b..8491558 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -29,7 +29,6 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
 	size_t ctx_len;
-	uint8_t opcode;
 	uint8_t egrp;
 	int ret;
 
@@ -80,10 +79,9 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 		return ret;
 
 	ctx_len = ret;
-	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
 	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->out_sa),
-				   opcode, ctx_len, egrp);
+				   false, ctx_len, egrp);
 
 	if (ret)
 		return ret;
@@ -133,7 +131,6 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
 	size_t ctx_len = 0;
-	uint8_t opcode;
 	uint8_t egrp;
 	int ret = 0;
 
@@ -172,10 +169,9 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 		sa->esn_en = 1;
 
 	ctx_len = ret;
-	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
 	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->in_sa),
-				   opcode, ctx_len, egrp);
+				   true, ctx_len, egrp);
 	if (ret)
 		return ret;
 
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index b087255..881861f 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -626,12 +626,14 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 	struct nix_send_hdr_s *send_hdr;
 	uint64_t sa_base = txq->sa_base;
 	uint32_t pkt_len, dlen_adj, rlen;
+	struct roc_ie_on_outb_hdr *hdr;
 	uint64x2_t cmd01, cmd23;
 	uint64_t lmt_status, sa;
 	union nix_send_sg_s *sg;
+	uint32_t esn_lo, esn_hi;
 	uintptr_t dptr, nixtx;
 	uint64_t ucode_cmd[4];
-	uint64_t esn, *iv;
+	uint64_t esn;
 	uint8_t l2_len;
 
 	mdata.u64 = *rte_security_dynfield(m);
@@ -670,14 +672,19 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 
 	/* Load opcode and cptr already prepared at pkt metadata set */
 	pkt_len -= l2_len;
-	pkt_len += sizeof(struct roc_onf_ipsec_outb_hdr) +
-		    ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
+	pkt_len += (sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN) +
+		   ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
 	sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 
-	sa = (uintptr_t)roc_nix_inl_onf_ipsec_outb_sa(sa_base, mdata.sa_idx);
+	sa = (uintptr_t)roc_nix_inl_on_ipsec_outb_sa(sa_base, mdata.sa_idx);
 	ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
-	ucode_cmd[0] = (ROC_IE_ONF_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 |
-			0x40UL << 48 | pkt_len);
+	ucode_cmd[0] = (((ROC_IE_ON_OUTB_MAX_CTX_LEN << 8) |
+			 ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC)
+				<< 48 |
+			(ROC_IE_ON_OUTB_IKEV2_SINGLE_SA_SUPPORT |
+			 (ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ >>
+			  3)) << 32 |
+			pkt_len);
 
 	/* CPT Word 0 and Word 1 */
 	cmd01 = vdupq_n_u64((nixtx + 16) | (cn9k_nix_tx_ext_subs(flags) + 1));
@@ -687,35 +694,40 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 	/* CPT word 2 and 3 */
 	cmd23 = vdupq_n_u64(0);
 	cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) |
-				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
-	cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20),
+			       cmd23, 0);
+	cmd23 = vsetq_lane_u64(((uintptr_t)m + sizeof(struct rte_mbuf)) | 1,
+			       cmd23, 1);
 
 	dptr += l2_len - ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ -
-		sizeof(struct roc_onf_ipsec_outb_hdr);
+		(sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN);
 	ucode_cmd[1] = dptr;
 	ucode_cmd[2] = dptr;
 
-	/* Update IV to zero and l2 sz */
-	*(uint16_t *)(dptr + sizeof(struct roc_onf_ipsec_outb_hdr)) =
+	/* Update l2 sz */
+	*(uint16_t *)(dptr + (sizeof(struct roc_ie_on_outb_hdr) -
+			      ROC_IE_ON_MAX_IV_LEN)) =
 		rte_cpu_to_be_16(ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ);
-	iv = (uint64_t *)(dptr + 8);
-	iv[0] = 0;
-	iv[1] = 0;
 
 	/* Head wait if needed */
 	if (base)
 		roc_sso_hws_head_wait(base);
 
 	/* ESN */
-	outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
+	outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd((void *)sa);
 	esn = outb_priv->esn;
 	outb_priv->esn = esn + 1;
 
 	ucode_cmd[0] |= (esn >> 32) << 16;
-	esn = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+	esn_lo = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+	esn_hi = rte_cpu_to_be_32(esn >> 32);
 
-	/* Update ESN and IPID and IV */
-	*(uint64_t *)dptr = esn << 32 | esn;
+	/* Update ESN, IPID and IV */
+	hdr = (struct roc_ie_on_outb_hdr *)dptr;
+	hdr->ip_id = esn_lo;
+	hdr->seq = esn_lo;
+	hdr->esn = esn_hi;
+	hdr->df_tos = 0;
 
 	rte_io_wmb();
 	cn9k_sso_txq_fc_wait(txq);
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 449729f..472a4b0 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -79,6 +79,9 @@ struct cn9k_outb_priv_data {
 
 	/* Back pointer to eth sec session */
 	struct cnxk_eth_sec_sess *eth_sec;
+
+	/* IV in DBG mode */
+	uint8_t iv_dbg[ROC_IE_ON_MAX_IV_LEN];
 };
 
 struct cn9k_sec_sess_priv {
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 4dd0b61..88b95fb 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -134,6 +134,37 @@ ar_window_init(struct cn9k_inb_priv_data *inb_priv)
 	return 0;
 }
 
+static void
+outb_dbg_iv_update(struct roc_ie_on_common_sa *common_sa, const char *__iv_str)
+{
+	uint8_t *iv_dbg = common_sa->iv.aes_iv;
+	char *iv_str = strdup(__iv_str);
+	char *iv_b = NULL;
+	char *save;
+	int i, iv_len = ROC_IE_ON_MAX_IV_LEN;
+
+	if (!iv_str)
+		return;
+
+	if (common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
+	    common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
+	    common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
+	    common_sa->ctl.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
+		iv_dbg = common_sa->iv.gcm.iv;
+		iv_len = 8;
+	}
+
+	memset(iv_dbg, 0, iv_len);
+	for (i = 0; i < iv_len; i++) {
+		iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
+		if (!iv_b)
+			break;
+		iv_dbg[i] = strtoul(iv_b, NULL, 0);
+	}
+
+	free(iv_str);
+}
+
 static int
 cn9k_eth_sec_session_create(void *device,
 			    struct rte_security_session_conf *conf,
@@ -150,6 +181,7 @@ cn9k_eth_sec_session_create(void *device,
 	rte_spinlock_t *lock;
 	char tbuf[128] = {0};
 	bool inbound;
+	int ctx_len;
 	int rc = 0;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
@@ -183,21 +215,26 @@ cn9k_eth_sec_session_create(void *device,
 	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
 	sess_priv.u64 = 0;
 
+	if (!dev->outb.lf_base) {
+		plt_err("Could not allocate security session private data");
+		return -ENOMEM;
+	}
+
 	if (inbound) {
 		struct cn9k_inb_priv_data *inb_priv;
-		struct roc_onf_ipsec_inb_sa *inb_sa;
+		struct roc_ie_on_inb_sa *inb_sa;
 		uint32_t spi_mask;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
-				  ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
+				  ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD);
 
 		spi_mask = roc_nix_inl_inb_spi_range(nix, false, NULL, NULL);
 
 		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
 		 * device always for CN9K.
 		 */
-		inb_sa = (struct roc_onf_ipsec_inb_sa *)
-			 roc_nix_inl_inb_sa_get(nix, false, ipsec->spi);
+		inb_sa = (struct roc_ie_on_inb_sa *)roc_nix_inl_inb_sa_get(
+			nix, false, ipsec->spi);
 		if (!inb_sa) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to create ingress sa");
@@ -206,7 +243,7 @@ cn9k_eth_sec_session_create(void *device,
 		}
 
 		/* Check if SA is already in use */
-		if (inb_sa->ctl.valid) {
+		if (inb_sa->common_sa.ctl.valid) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Inbound SA with SPI %u already in use",
 				 ipsec->spi);
@@ -214,17 +251,26 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
-		memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
+		memset(inb_sa, 0, sizeof(struct roc_ie_on_inb_sa));
 
 		/* Fill inbound sa params */
-		rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
-		if (rc) {
+		rc = cnxk_on_ipsec_inb_sa_create(ipsec, crypto, inb_sa);
+		if (rc < 0) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to init inbound sa, rc=%d", rc);
 			goto mempool_put;
 		}
 
-		inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
+		ctx_len = rc;
+		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa, inb_sa, inbound,
+					   ctx_len);
+		if (rc) {
+			snprintf(tbuf, sizeof(tbuf),
+				 "Failed to create inbound sa, rc=%d", rc);
+			goto mempool_put;
+		}
+
+		inb_priv = roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(inb_sa);
 		/* Back pointer to get eth_sec */
 		inb_priv->eth_sec = eth_sec;
 
@@ -253,27 +299,38 @@ cn9k_eth_sec_session_create(void *device,
 		dev->inb.nb_sess++;
 	} else {
 		struct cn9k_outb_priv_data *outb_priv;
-		struct roc_onf_ipsec_outb_sa *outb_sa;
 		uintptr_t sa_base = dev->outb.sa_base;
 		struct cnxk_ipsec_outb_rlens *rlens;
+		struct roc_ie_on_outb_sa *outb_sa;
+		const char *iv_str;
 		uint32_t sa_idx;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
-				  ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
+				  ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD);
 
 		/* Alloc an sa index */
 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, 0);
 		if (rc)
 			goto mempool_put;
 
-		outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
-		outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
+		outb_sa = roc_nix_inl_on_ipsec_outb_sa(sa_base, sa_idx);
+		outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(outb_sa);
 		rlens = &outb_priv->rlens;
 
-		memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
+		memset(outb_sa, 0, sizeof(struct roc_ie_on_outb_sa));
 
 		/* Fill outbound sa params */
-		rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
+		rc = cnxk_on_ipsec_outb_sa_create(ipsec, crypto, outb_sa);
+		if (rc < 0) {
+			snprintf(tbuf, sizeof(tbuf),
+				 "Failed to init outbound sa, rc=%d", rc);
+			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+			goto mempool_put;
+		}
+
+		ctx_len = rc;
+		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa, outb_sa, inbound,
+					   ctx_len);
 		if (rc) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to init outbound sa, rc=%d", rc);
@@ -281,6 +338,18 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
+		/* Always enable explicit IV.
+		 * Copy the IV from application only when iv_gen_disable flag is
+		 * set
+		 */
+		outb_sa->common_sa.ctl.explicit_iv_en = 1;
+
+		if (conf->ipsec.options.iv_gen_disable == 1) {
+			iv_str = getenv("ETH_SEC_IV_OVR");
+			if (iv_str)
+				outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
+		}
+
 		/* Save userdata */
 		outb_priv->userdata = conf->userdata;
 		outb_priv->sa_idx = sa_idx;
@@ -288,8 +357,8 @@ cn9k_eth_sec_session_create(void *device,
 		/* Start sequence number with 1 */
 		outb_priv->seq = 1;
 
-		memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
-		if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
+		memcpy(&outb_priv->nonce, outb_sa->common_sa.iv.gcm.nonce, 4);
+		if (outb_sa->common_sa.ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
 			outb_priv->copy_salt = 1;
 
 		/* Save rlen info */
@@ -337,9 +406,9 @@ cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	struct roc_onf_ipsec_outb_sa *outb_sa;
-	struct roc_onf_ipsec_inb_sa *inb_sa;
 	struct cnxk_eth_sec_sess *eth_sec;
+	struct roc_ie_on_outb_sa *outb_sa;
+	struct roc_ie_on_inb_sa *inb_sa;
 	struct rte_mempool *mp;
 	rte_spinlock_t *lock;
 
@@ -353,14 +422,14 @@ cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
 	if (eth_sec->inb) {
 		inb_sa = eth_sec->sa;
 		/* Disable SA */
-		inb_sa->ctl.valid = 0;
+		inb_sa->common_sa.ctl.valid = 0;
 
 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
 		dev->inb.nb_sess--;
 	} else {
 		outb_sa = eth_sec->sa;
 		/* Disable SA */
-		outb_sa->ctl.valid = 0;
+		outb_sa->common_sa.ctl.valid = 0;
 
 		/* Release Outbound SA index */
 		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h
index 25a4927..1a9f920 100644
--- a/drivers/net/cnxk/cn9k_rx.h
+++ b/drivers/net/cnxk/cn9k_rx.h
@@ -171,7 +171,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 }
 
 static inline int
-ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
+ipsec_antireplay_check(struct roc_ie_on_inb_sa *sa,
 		       struct cn9k_inb_priv_data *priv, uintptr_t data,
 		       uint32_t win_sz)
 {
@@ -183,7 +183,7 @@ ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
 	uint8_t esn;
 	int rc;
 
-	esn = sa->ctl.esn_en;
+	esn = sa->common_sa.ctl.esn_en;
 	seql = rte_be_to_cpu_32(*((uint32_t *)(data + IPSEC_SQ_LO_IDX)));
 
 	if (!esn) {
@@ -200,11 +200,12 @@ ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
 	rte_spinlock_lock(&ar->lock);
 	rc = cnxk_on_anti_replay_check(seq, ar, win_sz);
 	if (esn && !rc) {
-		seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->esn_hi) << 32) |
-			    rte_be_to_cpu_32(sa->esn_low);
+		seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->common_sa.seq_t.th)
+			     << 32) |
+			    rte_be_to_cpu_32(sa->common_sa.seq_t.tl);
 		if (seq > seq_in_sa) {
-			sa->esn_low = rte_cpu_to_be_32(seql);
-			sa->esn_hi = rte_cpu_to_be_32(seqh);
+			sa->common_sa.seq_t.tl = rte_cpu_to_be_32(seql);
+			sa->common_sa.seq_t.th = rte_cpu_to_be_32(seqh);
 		}
 	}
 	rte_spinlock_unlock(&ar->lock);
@@ -266,9 +267,10 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	const union nix_rx_parse_u *rx =
 		(const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
 	struct cn9k_inb_priv_data *sa_priv;
-	struct roc_onf_ipsec_inb_sa *sa;
+	struct roc_ie_on_inb_sa *sa;
 	uint8_t lcptr = rx->lcptr;
-	struct rte_ipv4_hdr *ipv4;
+	struct rte_ipv4_hdr *ip;
+	struct rte_ipv6_hdr *ip6;
 	uint16_t data_off, res;
 	uint32_t spi, win_sz;
 	uint32_t spi_mask;
@@ -279,6 +281,7 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	res = *(uint64_t *)(res_sg0 + 8);
 	data_off = *rearm_val & (BIT_ULL(16) - 1);
 	data = (uintptr_t)m->buf_addr;
+
 	data += data_off;
 
 	rte_prefetch0((void *)data);
@@ -294,10 +297,10 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	sa_w = sa_base & (ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	spi_mask = (1ULL << sa_w) - 1;
-	sa = roc_nix_inl_onf_ipsec_inb_sa(sa_base, spi & spi_mask);
+	sa = roc_nix_inl_on_ipsec_inb_sa(sa_base, spi & spi_mask);
 
 	/* Update dynamic field with userdata */
-	sa_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(sa);
+	sa_priv = roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(sa);
 	dw = *(__uint128_t *)sa_priv;
 	*rte_security_dynfield(m) = (uint64_t)dw;
 
@@ -309,16 +312,26 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	}
 
 	/* Get total length from IPv4 header. We can assume only IPv4 */
-	ipv4 = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
-				       ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+	ip = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
+				     ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+
+	if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
+	    IPVERSION) {
+		*len = rte_be_to_cpu_16(ip->total_length) + lcptr;
+	} else {
+		PLT_ASSERT(((ip->version_ihl & 0xf0) >>
+			    RTE_IPV4_IHL_MULTIPLIER) == 6);
+		ip6 = (struct rte_ipv6_hdr *)ip;
+		*len = rte_be_to_cpu_16(ip6->payload_len) +
+		       sizeof(struct rte_ipv6_hdr) + lcptr;
+	}
 
 	/* Update data offset */
-	data_off += (ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
-		     ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+	data_off +=
+		(ROC_ONF_IPSEC_INB_SPI_SEQ_SZ + ROC_ONF_IPSEC_INB_MAX_L2_SZ);
 	*rearm_val = *rearm_val & ~(BIT_ULL(16) - 1);
 	*rearm_val |= data_off;
 
-	*len = rte_be_to_cpu_16(ipv4->total_length) + lcptr;
 	return RTE_MBUF_F_RX_SEC_OFFLOAD;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c b/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
index bfdbd1e..dd8b7a5 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
@@ -14,59 +14,47 @@
 static int
 copy_outb_sa_9k(struct rte_tel_data *d, uint32_t i, void *sa)
 {
-	struct roc_onf_ipsec_outb_sa *out_sa;
 	union {
-		struct roc_ie_onf_sa_ctl ctl;
+		struct roc_ie_on_sa_ctl ctl;
 		uint64_t u64;
 	} w0;
+	struct roc_ie_on_outb_sa *out_sa;
 	char strw0[W0_MAXLEN];
 	char str[STR_MAXLEN];
 
-	out_sa = (struct roc_onf_ipsec_outb_sa *)sa;
-	w0.ctl = out_sa->ctl;
+	out_sa = (struct roc_ie_on_outb_sa *)sa;
+	w0.ctl = out_sa->common_sa.ctl;
 
 	snprintf(str, sizeof(str), "outsa_w0_%u", i);
 	snprintf(strw0, sizeof(strw0), "%" PRIu64, w0.u64);
 	rte_tel_data_add_dict_string(d, str, strw0);
 
-	snprintf(str, sizeof(str), "outsa_src_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->udp_src);
-
-	snprintf(str, sizeof(str), "outsa_dst_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->udp_dst);
-
-	snprintf(str, sizeof(str), "outsa_isrc_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->ip_src);
-
-	snprintf(str, sizeof(str), "outsa_idst_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->ip_dst);
-
 	return 0;
 }
 
 static int
 copy_inb_sa_9k(struct rte_tel_data *d, uint32_t i, void *sa)
 {
-	struct roc_onf_ipsec_inb_sa *in_sa;
 	union {
-		struct roc_ie_onf_sa_ctl ctl;
+		struct roc_ie_on_sa_ctl ctl;
 		uint64_t u64;
 	} w0;
+	struct roc_ie_on_inb_sa *in_sa;
 	char strw0[W0_MAXLEN];
 	char str[STR_MAXLEN];
 
-	in_sa = (struct roc_onf_ipsec_inb_sa *)sa;
-	w0.ctl = in_sa->ctl;
+	in_sa = (struct roc_ie_on_inb_sa *)sa;
+	w0.ctl = in_sa->common_sa.ctl;
 
 	snprintf(str, sizeof(str), "insa_w0_%u", i);
 	snprintf(strw0, sizeof(strw0), "%" PRIu64, w0.u64);
 	rte_tel_data_add_dict_string(d, str, strw0);
 
 	snprintf(str, sizeof(str), "insa_esnh_%u", i);
-	rte_tel_data_add_dict_u64(d, str, in_sa->esn_hi);
+	rte_tel_data_add_dict_u64(d, str, in_sa->common_sa.seq_t.th);
 
 	snprintf(str, sizeof(str), "insa_esnl_%u", i);
-	rte_tel_data_add_dict_u64(d, str, in_sa->esn_low);
+	rte_tel_data_add_dict_u64(d, str, in_sa->common_sa.seq_t.tl);
 
 	return 0;
 }
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 14/23] net/cnxk: add crypto capabilities for HMAC-SHA2
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (11 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 13/23] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:48 ` [PATCH 15/23] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
                   ` (11 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Add capabilities for HMAC_SHA2 and udp encap for 9k
security offload in inline mode.
Set explicit IV mode in IPsec context when IV is provided by the
application

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 79 ++++++++++++++++++++++++++++++++++----
 1 file changed, 71 insertions(+), 8 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 88b95fb..42ba04a 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -80,6 +80,66 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 16
+				},
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 24,
+					.max = 48,
+					.increment = 24
+					},
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 64,
+					.increment = 32
+				},
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
@@ -91,7 +151,9 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
-			.options = { 0 }
+			.options = {
+					.udp_encap = 1
+				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
@@ -103,7 +165,10 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
-			.options = { 0 }
+			.options = {
+					.udp_encap = 1,
+					.iv_gen_disable = 1
+				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
@@ -338,13 +403,11 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
-		/* Always enable explicit IV.
-		 * Copy the IV from application only when iv_gen_disable flag is
-		 * set
+		/* When IV is provided by the application,
+		 * copy the IV to context and enable explicit IV flag in context.
 		 */
-		outb_sa->common_sa.ctl.explicit_iv_en = 1;
-
-		if (conf->ipsec.options.iv_gen_disable == 1) {
+		if (ipsec->options.iv_gen_disable == 1) {
+			outb_sa->common_sa.ctl.explicit_iv_en = 1;
 			iv_str = getenv("ETH_SEC_IV_OVR");
 			if (iv_str)
 				outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 15/23] common/cnxk: enable aging on CN10K platform
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (12 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 14/23] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
@ 2022-08-09 18:48 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 16/23] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
                   ` (10 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:48 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

This patch set enables aging on CNF105 variant of CN10K platform.
Enables aging statistics while dumping/reset SQ statistics.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_errata.h    |  3 +--
 drivers/common/cnxk/roc_nix_debug.c | 19 +++++++++----------
 drivers/common/cnxk/roc_nix_stats.c |  2 ++
 3 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 8dc372f..d3b32f1 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -30,8 +30,7 @@ roc_errata_npa_has_no_fc_stype_ststp(void)
 static inline bool
 roc_errata_nix_has_no_drop_aging(void)
 {
-	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
-		roc_model_is_cnf10kb_a0());
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0());
 }
 
 /* Errata IPBUNIXRX-40130 */
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index efac7e5..bd7a5d3 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -472,22 +472,21 @@ nix_lf_sq_dump(__io struct nix_cn10k_sq_ctx_s *ctx, uint32_t *sqb_aura_p)
 	nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
 	nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
 
-	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
-		 ctx->vfi_lso_vld, ctx->vfi_lso_vlan1_ins_ena);
+	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", ctx->vfi_lso_vld,
+		 ctx->vfi_lso_vlan1_ins_ena);
 	nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
 		 ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
-	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
-		 ctx->vfi_lso_sb, ctx->vfi_lso_sizem1);
+	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", ctx->vfi_lso_sb,
+		 ctx->vfi_lso_sizem1);
 	nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
 
-	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->scm_lso_rem);
+	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "", (uint64_t)ctx->scm_lso_rem);
 	nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
 	nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->drop_octs);
-	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->drop_pkts);
+	nix_dump("W13: aged_drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_pkts);
+	nix_dump("W13: aged_drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_octs);
+	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
 
 	*sqb_aura_p = ctx->sqb_aura;
 }
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index 8fd5c71..2e5071e 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -238,6 +238,8 @@ nix_stat_tx_queue_reset(struct nix *nix, uint16_t qid)
 		aq->sq_mask.pkts = ~(aq->sq_mask.pkts);
 		aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs);
 		aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts);
+		aq->sq_mask.aged_drop_octs = ~(aq->sq_mask.aged_drop_octs);
+		aq->sq_mask.aged_drop_pkts = ~(aq->sq_mask.aged_drop_pkts);
 	}
 
 	rc = mbox_process(mbox);
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 16/23] common/cnxk: updated shaper profile with red algorithm
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (13 preceding siblings ...)
  2022-08-09 18:48 ` [PATCH 15/23] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 17/23] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
                   ` (9 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Updated shaper profile with user configurable RED algorithm.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h          | 1 +
 drivers/common/cnxk/roc_nix_tm_utils.c | 7 +++++--
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 5f5f5f9..8fd5990 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -566,6 +566,7 @@ struct roc_nix_tm_shaper_profile {
 	int32_t pkt_len_adj;
 	bool pkt_mode;
 	int8_t accuracy;
+	uint8_t red_algo;
 	/* Function to free this memory */
 	void (*free_fn)(void *profile);
 };
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index b9b605f..193f9df 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -1236,11 +1236,14 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 	struct nix_tm_shaper_profile *profile;
 	struct nix_tm_shaper_data cir, pir;
 
+	if (!roc_prof)
+		return;
+
 	profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
-	tm_node->red_algo = NIX_REDALG_STD;
+	tm_node->red_algo = roc_prof->red_algo;
 
 	/* C0 doesn't support STALL when both PIR & CIR are enabled */
-	if (profile && roc_model_is_cn96_cx()) {
+	if (roc_model_is_cn96_cx()) {
 		nix_tm_shaper_conf_get(profile, &cir, &pir);
 
 		if (pir.rate && cir.rate)
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 17/23] common/cnxk: add 98xx A1 platform
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (14 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 16/23] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 18/23] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
                   ` (8 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for 98xx A1 pass

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c |  1 +
 drivers/common/cnxk/roc_model.h | 16 +++++++++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index b040bc0..326b85e 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -65,6 +65,7 @@ static const struct model_db {
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 0, ROC_MODEL_CN98xx_A0, "cn98xx_a0"},
+	{VENDOR_CAVIUM, PART_98xx, 0, 1, ROC_MODEL_CN98xx_A1, "cn98xx_a1"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 0, ROC_MODEL_CN96xx_A0, "cn96xx_a0"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 1, ROC_MODEL_CN96xx_B0, "cn96xx_b0"},
 	{VENDOR_CAVIUM, PART_96xx, 2, 0, ROC_MODEL_CN96xx_C0, "cn96xx_c0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index d231d44..57a8af0 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -21,6 +21,7 @@ struct roc_model {
 #define ROC_MODEL_CNF95xxN_A1  BIT_ULL(14)
 #define ROC_MODEL_CNF95xxN_B0  BIT_ULL(15)
 #define ROC_MODEL_CN98xx_A0    BIT_ULL(16)
+#define ROC_MODEL_CN98xx_A1    BIT_ULL(17)
 #define ROC_MODEL_CN106xx_A0   BIT_ULL(20)
 #define ROC_MODEL_CNF105xx_A0  BIT_ULL(21)
 #define ROC_MODEL_CNF105xxN_A0 BIT_ULL(22)
@@ -38,10 +39,11 @@ struct roc_model {
 } __plt_cache_aligned;
 
 #define ROC_MODEL_CN96xx_Ax (ROC_MODEL_CN96xx_A0 | ROC_MODEL_CN96xx_B0)
+#define ROC_MODEL_CN98xx_Ax (ROC_MODEL_CN98xx_A0 | ROC_MODEL_CN98xx_A1)
 #define ROC_MODEL_CN9K                                                         \
 	(ROC_MODEL_CN96xx_Ax | ROC_MODEL_CN96xx_C0 | ROC_MODEL_CNF95xx_A0 |    \
 	 ROC_MODEL_CNF95xx_B0 | ROC_MODEL_CNF95xxMM_A0 |                       \
-	 ROC_MODEL_CNF95xxO_A0 | ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CN98xx_A0 | \
+	 ROC_MODEL_CNF95xxO_A0 | ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CN98xx_Ax | \
 	 ROC_MODEL_CNF95xxN_A1 | ROC_MODEL_CNF95xxN_B0)
 #define ROC_MODEL_CNF9K                                                        \
 	(ROC_MODEL_CNF95xx_A0 | ROC_MODEL_CNF95xx_B0 |                         \
@@ -111,10 +113,22 @@ roc_model_is_cn10k(void)
 static inline uint64_t
 roc_model_is_cn98xx(void)
 {
+	return (roc_model->flag & ROC_MODEL_CN98xx_Ax);
+}
+
+static inline uint64_t
+roc_model_is_cn98xx_a0(void)
+{
 	return (roc_model->flag & ROC_MODEL_CN98xx_A0);
 }
 
 static inline uint64_t
+roc_model_is_cn98xx_a1(void)
+{
+	return (roc_model->flag & ROC_MODEL_CN98xx_A1);
+}
+
+static inline uint64_t
 roc_model_is_cn96_a0(void)
 {
 	return roc_model->flag & ROC_MODEL_CN96xx_A0;
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 18/23] net/cnxk: enable additional ciphers for inline
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (15 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 17/23] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 19/23] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
                   ` (7 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable below ciphers and auths as part of capabilities for inline IPsec
AES_CTR
AES_XCBC_MAC
AES_GMAC

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 86 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 86 insertions(+)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 42ba04a..2dc9fe1 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -10,6 +10,27 @@
 #include <cnxk_security.h>
 
 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -60,6 +81,71 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
+	{	/* AES-XCBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0,
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	{	/* SHA1 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 19/23] net/cnxk: enable 3des-cbc cipher capability
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (16 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 18/23] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 20/23] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
                   ` (6 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable 3DES-CBC cipher capability for inline IPsec

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c |  3 +++
 drivers/crypto/cnxk/cn9k_ipsec.c    |  6 ++++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 21 ++++++++++++++++++++-
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 89ac900..a442549 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -1033,6 +1033,9 @@ on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
 			case RTE_CRYPTO_CIPHER_NULL:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
 				break;
+			case RTE_CRYPTO_CIPHER_3DES_CBC:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_3DES_CBC;
+				break;
 			case RTE_CRYPTO_CIPHER_AES_CBC:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
 				aes_key_len = cipher_xform->cipher.key.length;
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 8491558..3d37449 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -248,6 +248,12 @@ cn9k_ipsec_xform_verify(struct rte_security_ipsec_xform *ipsec,
 				plt_err("Transport mode AES-CBC AES-XCBC is not supported");
 				return -ENOTSUP;
 			}
+
+			if ((cipher->algo == RTE_CRYPTO_CIPHER_3DES_CBC) &&
+			    (auth->algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC)) {
+				plt_err("Transport mode 3DES-CBC AES-XCBC is not supported");
+				return -ENOTSUP;
+			}
 		}
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 2dc9fe1..9536a99 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -30,7 +30,26 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, },
 		}, }
 	},
-
+	{	/* 3DES CBC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 8
+				}
+			}, },
+		}, }
+	},
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 20/23] net/cnxk: skip PFC configuration on LBK
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (17 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 19/23] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 21/23] common/cnxk: add support for CPT second pass Nithin Dabilpuram
                   ` (5 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

CNXK platforms do not support PFC on LBK so skipping
configuration on LBK interfaces.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     | 2 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 02416ad..f08a20f 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1859,7 +1859,7 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 		pfc_conf.tx_pause.rx_qid = i;
 		rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
 							      &pfc_conf);
-		if (rc)
+		if (rc && rc != -ENOTSUP)
 			plt_err("Failed to reset PFC. error code(%d)", rc);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 1592971..64beabd 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -356,8 +356,8 @@ cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
 		return -ENOTSUP;
 	}
 
-	if (roc_nix_is_sdp(nix)) {
-		plt_err("Prio flow ctrl config is not allowed on SDP");
+	if (roc_nix_is_sdp(nix) || roc_nix_is_lbk(nix)) {
+		plt_nix_dbg("Prio flow ctrl config is not allowed on SDP/LBK");
 		return -ENOTSUP;
 	}
 
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 21/23] common/cnxk: add support for CPT second pass
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (18 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 20/23] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 22/23] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
                   ` (4 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Added mailbox for masking and setting nix_rq_ctx
parameters and enabling rq masking in ipsec_cfg1
so second pass is applied to all rq's

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/hw/nix.h      |  4 +-
 drivers/common/cnxk/roc_mbox.h    | 23 ++++++++++-
 drivers/common/cnxk/roc_nix_inl.c | 81 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 106 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 5863e35..a535264 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -1242,7 +1242,9 @@ struct nix_cn10k_rq_ctx_s {
 	uint64_t ipsech_ena : 1;
 	uint64_t ena_wqwd : 1;
 	uint64_t cq : 20;
-	uint64_t rsvd_36_24 : 13;
+	uint64_t rsvd_34_24 : 11;
+	uint64_t port_ol4_dis : 1;
+	uint64_t port_il4_dis : 1;
 	uint64_t lenerr_dis : 1;
 	uint64_t csum_il4_dis : 1;
 	uint64_t csum_ol4_dis : 1;
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 912de11..688c70b 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -265,7 +265,9 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)            \
 	M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg,        \
-	  msg_req, nix_inline_ipsec_cfg)
+	  msg_req, nix_inline_ipsec_cfg)				       \
+	M(NIX_LF_INLINE_RQ_CFG, 0x8024, nix_lf_inline_rq_cfg,                  \
+	  nix_rq_cpt_field_mask_cfg_req, msg_rsp)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1088,6 +1090,25 @@ struct nix_mark_format_cfg_rsp {
 	uint8_t __io mark_format_idx;
 };
 
+struct nix_rq_cpt_field_mask_cfg_req {
+	struct mbox_msghdr hdr;
+#define RQ_CTX_MASK_MAX 6
+	union {
+		uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
+		struct nix_cn10k_rq_ctx_s rq_set;
+	};
+	union {
+		uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
+		struct nix_cn10k_rq_ctx_s rq_mask;
+	};
+	struct nix_lf_rx_ipec_cfg1_req {
+		uint32_t __io spb_cpt_aura;
+		uint8_t __io rq_mask_enable;
+		uint8_t __io spb_cpt_sizem1;
+		uint8_t __io spb_cpt_enable;
+	} ipsec_cfg1;
+};
+
 struct nix_lso_format_cfg {
 	struct mbox_msghdr hdr;
 	uint64_t __io field_mask;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index be0b806..cdf31b1 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -416,6 +416,70 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
 	return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
 }
 
+static int
+nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct nix_rq_cpt_field_mask_cfg_req *msk_req;
+	struct idev_cfg *idev = idev_get_cfg();
+	struct mbox *mbox = (&nix->dev)->mbox;
+	struct idev_nix_inl_cfg *inl_cfg;
+	uint64_t aura_handle;
+	int rc = -ENOSPC;
+	int i;
+
+	if (!idev)
+		return rc;
+
+	inl_cfg = &idev->inl_cfg;
+	msk_req = mbox_alloc_msg_nix_lf_inline_rq_cfg(mbox);
+	if (msk_req == NULL)
+		return rc;
+
+	for (i = 0; i < RQ_CTX_MASK_MAX; i++)
+		msk_req->rq_ctx_word_mask[i] = 0xFFFFFFFFFFFFFFFF;
+
+	msk_req->rq_set.len_ol3_dis = 1;
+	msk_req->rq_set.len_ol4_dis = 1;
+	msk_req->rq_set.len_il3_dis = 1;
+
+	msk_req->rq_set.len_il4_dis = 1;
+	msk_req->rq_set.csum_ol4_dis = 1;
+	msk_req->rq_set.csum_il4_dis = 1;
+
+	msk_req->rq_set.lenerr_dis = 1;
+	msk_req->rq_set.port_ol4_dis = 1;
+	msk_req->rq_set.port_il4_dis = 1;
+
+	msk_req->rq_set.lpb_drop_ena = 0;
+	msk_req->rq_set.spb_drop_ena = 0;
+	msk_req->rq_set.xqe_drop_ena = 0;
+
+	msk_req->rq_mask.len_ol3_dis = ~(msk_req->rq_set.len_ol3_dis);
+	msk_req->rq_mask.len_ol4_dis = ~(msk_req->rq_set.len_ol4_dis);
+	msk_req->rq_mask.len_il3_dis = ~(msk_req->rq_set.len_il3_dis);
+
+	msk_req->rq_mask.len_il4_dis = ~(msk_req->rq_set.len_il4_dis);
+	msk_req->rq_mask.csum_ol4_dis = ~(msk_req->rq_set.csum_ol4_dis);
+	msk_req->rq_mask.csum_il4_dis = ~(msk_req->rq_set.csum_il4_dis);
+
+	msk_req->rq_mask.lenerr_dis = ~(msk_req->rq_set.lenerr_dis);
+	msk_req->rq_mask.port_ol4_dis = ~(msk_req->rq_set.port_ol4_dis);
+	msk_req->rq_mask.port_il4_dis = ~(msk_req->rq_set.port_il4_dis);
+
+	msk_req->rq_mask.lpb_drop_ena = ~(msk_req->rq_set.lpb_drop_ena);
+	msk_req->rq_mask.spb_drop_ena = ~(msk_req->rq_set.spb_drop_ena);
+	msk_req->rq_mask.xqe_drop_ena = ~(msk_req->rq_set.xqe_drop_ena);
+
+	aura_handle = roc_npa_zero_aura_handle();
+	msk_req->ipsec_cfg1.spb_cpt_aura = roc_npa_aura_handle_to_aura(aura_handle);
+	msk_req->ipsec_cfg1.rq_mask_enable = enable;
+	msk_req->ipsec_cfg1.spb_cpt_sizem1 = inl_cfg->buf_sz;
+	msk_req->ipsec_cfg1.spb_cpt_enable = enable;
+
+	return mbox_process(mbox);
+}
+
 int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
@@ -472,6 +536,14 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 		nix->need_meta_aura = true;
 		idev->inl_cfg.refs++;
 	}
+
+	if (roc_model_is_cn10kb_a0()) {
+		rc = nix_inl_rq_mask_cfg(roc_nix, true);
+		if (rc) {
+			plt_err("Failed to get rq mask rc=%d", rc);
+			return rc;
+		}
+	}
 	nix->inl_inb_ena = true;
 	return 0;
 }
@@ -481,6 +553,7 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 {
 	struct idev_cfg *idev = idev_get_cfg();
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	int rc;
 
 	if (!nix->inl_inb_ena)
 		return 0;
@@ -496,6 +569,14 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 			nix_inl_meta_aura_destroy();
 	}
 
+	if (roc_model_is_cn10kb_a0()) {
+		rc = nix_inl_rq_mask_cfg(roc_nix, false);
+		if (rc) {
+			plt_err("Failed to get rq mask rc=%d", rc);
+			return rc;
+		}
+	}
+
 	/* Flush Inbound CTX cache entries */
 	roc_nix_cpt_ctx_cache_sync(roc_nix);
 
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 22/23] common/cnxk: add CQ limit associated with SQ
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (19 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 21/23] common/cnxk: add support for CPT second pass Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-09 18:49 ` [PATCH 23/23] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
                   ` (3 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

Update cq threshold limit associated with sq

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       | 1 +
 drivers/common/cnxk/roc_nix_queue.c | 2 ++
 2 files changed, 3 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 8fd5990..2fddb20 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -340,6 +340,7 @@ struct roc_nix_sq {
 	uint32_t nb_desc;
 	uint16_t qid;
 	uint16_t cqid;
+	uint16_t cq_drop_thresh;
 	bool sso_ena;
 	bool cq_ena;
 	/* End of Input parameters */
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index b197de0..6030332 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -907,6 +907,7 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
 	aq->sq.sso_ena = !!sq->sso_ena;
 	aq->sq.cq_ena = !!sq->cq_ena;
 	aq->sq.cq = sq->cqid;
+	aq->sq.cq_limit = sq->cq_drop_thresh;
 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
 		aq->sq.sqe_stype = NIX_STYPE_STP;
 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
@@ -1024,6 +1025,7 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
 	aq->sq.sso_ena = !!sq->sso_ena;
 	aq->sq.cq_ena = !!sq->cq_ena;
 	aq->sq.cq = sq->cqid;
+	aq->sq.cq_limit = sq->cq_drop_thresh;
 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
 		aq->sq.sqe_stype = NIX_STYPE_STP;
 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH 23/23] common/cnxk: support Tx compl event via RQ to CQ mapping
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (20 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 22/23] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
@ 2022-08-09 18:49 ` Nithin Dabilpuram
  2022-08-30  4:51 ` [PATCH 01/23] common/cnxk: fix part value for cn10k Jerin Jacob
                   ` (2 subsequent siblings)
  24 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-08-09 18:49 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

This patch adds RoC support for Tx completion events via
RQ to CQ mapping.

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix.c       | 5 ++++-
 drivers/common/cnxk/roc_nix.h       | 2 ++
 drivers/common/cnxk/roc_nix_queue.c | 7 ++-----
 drivers/net/cnxk/cnxk_ethdev.c      | 3 +++
 4 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 151d8c3..4bb306b 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -154,7 +154,10 @@ roc_nix_lf_alloc(struct roc_nix *roc_nix, uint32_t nb_rxq, uint32_t nb_txq,
 		return rc;
 	req->rq_cnt = nb_rxq;
 	req->sq_cnt = nb_txq;
-	req->cq_cnt = nb_rxq;
+	if (roc_nix->tx_compl_ena)
+		req->cq_cnt = nb_rxq + nb_txq;
+	else
+		req->cq_cnt = nb_rxq;
 	/* XQESZ can be W64 or W16 */
 	req->xqe_sz = NIX_XQESZ_W16;
 	req->rss_sz = nix->reta_sz;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2fddb20..3366080 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -281,6 +281,7 @@ struct roc_nix_stats_queue {
 struct roc_nix_rq {
 	/* Input parameters */
 	uint16_t qid;
+	uint16_t cqid; /* Not valid when SSO is enabled */
 	uint16_t bpf_id;
 	uint64_t aura_handle;
 	bool ipsech_ena;
@@ -406,6 +407,7 @@ struct roc_nix {
 	uint16_t max_sqb_count;
 	enum roc_nix_rss_reta_sz reta_sz;
 	bool enable_loop;
+	bool tx_compl_ena;
 	bool hw_vlan_ins;
 	uint8_t lock_rx_ctx;
 	uint16_t sqb_slack;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 6030332..405d9a8 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -268,7 +268,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
-		aq->rq.cq = rq->qid;
+		aq->rq.cq = rq->cqid;
 	}
 
 	if (rq->ipsech_ena)
@@ -395,7 +395,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
-		aq->rq.cq = rq->qid;
+		aq->rq.cq = rq->cqid;
 	}
 
 	if (rq->ipsech_ena) {
@@ -644,9 +644,6 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
 	if (cq == NULL)
 		return NIX_ERR_PARAM;
 
-	if (cq->qid >= nix->nb_rx_queues)
-		return NIX_ERR_QUEUE_INVALID_RANGE;
-
 	qsize = nix_qsize_clampup(cq->nb_desc);
 	cq->nb_desc = nix_qsize_to_val(qsize);
 	cq->qmask = cq->nb_desc - 1;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index f08a20f..eb562ec 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -606,6 +606,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	/* Setup ROC RQ */
 	rq = &dev->rqs[qid];
 	rq->qid = qid;
+	rq->cqid = cq->qid;
 	rq->aura_handle = mp->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
@@ -1168,6 +1169,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	if (roc_nix_is_lbk(nix))
 		nix->enable_loop = eth_dev->data->dev_conf.lpbk_mode;
 
+	nix->tx_compl_ena = 0;
+
 	/* Alloc a nix lf */
 	rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
 	if (rc) {
-- 
2.8.4


^ permalink raw reply	[flat|nested] 89+ messages in thread

* Re: [PATCH 01/23] common/cnxk: fix part value for cn10k
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (21 preceding siblings ...)
  2022-08-09 18:49 ` [PATCH 23/23] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
@ 2022-08-30  4:51 ` Jerin Jacob
  2022-08-30  5:16   ` [EXT] " Nithin Kumar Dabilpuram
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
  24 siblings, 1 reply; 89+ messages in thread
From: Jerin Jacob @ 2022-08-30  4:51 UTC (permalink / raw)
  To: Nithin Dabilpuram
  Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, Jerin Jacob,
	dpdk-dev, Harman Kalra

On Wed, Aug 10, 2022 at 12:19 AM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> From: Harman Kalra <hkalra@marvell.com>
>
> Updating the logic for getting part and pass value for cn10k family,
> as device tree compatible logic does not work in VMs.
> Scanning all the PCI device and detect first RVU device, subsystem
> device file gives part no and revision file provide pass information.
>
> Fixes: 014a9e222bac ("common/cnxk: add model init and IO handling API")
>
> Signed-off-by: Harman Kalra <hkalra@marvell.com>
> ---
>
> Depends-on: series-23650("[v2] event/cnxk: add eth port specific PTP enable")
> Depends-on: series-24029("[1/4] cnxk/net: add fc check in vector event Tx path")

# In order to merge the series, We need to remove the dependency with
event/cnxk patches as both are going to different trees.
Could you just pull 1/4 of
https://patches.dpdk.org/project/dpdk/patch/20220719111125.8276-1-pbhagavatula@marvell.com/
and rebase this series on top of dpdk-next-net-mrvl and send the next version.


# Also, Please update git commit log in this series whenever there are
only one liners.



>
>  drivers/common/cnxk/roc_model.c    | 152 +++++++++++++++++++++++++++----------
>  drivers/common/cnxk/roc_platform.h |   3 +
>  2 files changed, 113 insertions(+), 42 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
> index a68baa6..791ffa6 100644
> --- a/drivers/common/cnxk/roc_model.c
> +++ b/drivers/common/cnxk/roc_model.c
> @@ -2,6 +2,7 @@
>   * Copyright(C) 2021 Marvell.
>   */
>
> +#include <dirent.h>
>  #include <fcntl.h>
>  #include <unistd.h>
>
> @@ -40,6 +41,16 @@ struct roc_model *roc_model;
>  #define MODEL_MINOR_SHIFT 0
>  #define MODEL_MINOR_MASK  ((1 << MODEL_MINOR_BITS) - 1)
>
> +#define MODEL_CN10K_PART_SHIFT 8
> +#define MODEL_CN10K_PASS_BITS  4
> +#define MODEL_CN10K_PASS_MASK  ((1 << MODEL_CN10K_PASS_BITS) - 1)
> +#define MODEL_CN10K_MAJOR_BITS 2
> +#define MODEL_CN10K_MAJOR_SHIFT 2
> +#define MODEL_CN10K_MAJOR_MASK ((1 << MODEL_CN10K_MAJOR_BITS) - 1)
> +#define MODEL_CN10K_MINOR_BITS 2
> +#define MODEL_CN10K_MINOR_SHIFT 0
> +#define MODEL_CN10K_MINOR_MASK ((1 << MODEL_CN10K_MINOR_BITS) - 1)
> +
>  static const struct model_db {
>         uint32_t impl;
>         uint32_t part;
> @@ -66,55 +77,101 @@ static const struct model_db {
>         {VENDOR_CAVIUM, PART_95xxMM, 0, 0, ROC_MODEL_CNF95xxMM_A0,
>          "cnf95xxmm_a0"}};
>
> -static uint32_t
> -cn10k_part_get(void)
> +/* Detect if RVU device */
> +static bool
> +is_rvu_device(unsigned long val)
>  {
> -       uint32_t soc = 0x0;
> -       char buf[BUFSIZ];
> -       char *ptr;
> -       FILE *fd;
> -
> -       /* Read the CPU compatible variant */
> -       fd = fopen("/proc/device-tree/compatible", "r");
> -       if (!fd) {
> -               plt_err("Failed to open /proc/device-tree/compatible");
> -               goto err;
> -       }
> +       return (val == PCI_DEVID_CNXK_RVU_PF || val == PCI_DEVID_CNXK_RVU_VF ||
> +               val == PCI_DEVID_CNXK_RVU_AF ||
> +               val == PCI_DEVID_CNXK_RVU_AF_VF ||
> +               val == PCI_DEVID_CNXK_RVU_NPA_PF ||
> +               val == PCI_DEVID_CNXK_RVU_NPA_VF ||
> +               val == PCI_DEVID_CNXK_RVU_SSO_TIM_PF ||
> +               val == PCI_DEVID_CNXK_RVU_SSO_TIM_VF ||
> +               val == PCI_DEVID_CN10K_RVU_CPT_PF ||
> +               val == PCI_DEVID_CN10K_RVU_CPT_VF);
> +}
>
> -       if (fgets(buf, sizeof(buf), fd) == NULL) {
> -               plt_err("Failed to read from /proc/device-tree/compatible");
> -               goto fclose;
> -       }
> -       ptr = strchr(buf, ',');
> -       if (!ptr) {
> -               plt_err("Malformed 'CPU compatible': <%s>", buf);
> -               goto fclose;
> -       }
> -       ptr++;
> -       if (strcmp("cn10ka", ptr) == 0) {
> -               soc = PART_106xx;
> -       } else if (strcmp("cnf10ka", ptr) == 0) {
> -               soc = PART_105xx;
> -       } else if (strcmp("cnf10kb", ptr) == 0) {
> -               soc = PART_105xxN;
> -       } else if (strcmp("cn10kb", ptr) == 0) {
> -               soc = PART_103xx;
> -       } else {
> -               plt_err("Unidentified 'CPU compatible': <%s>", ptr);
> -               goto fclose;
> +static int
> +rvu_device_lookup(const char *dirname, uint32_t *part, uint32_t *pass)
> +{
> +       char filename[PATH_MAX];
> +       unsigned long val;
> +
> +       /* Check if vendor id is cavium */
> +       snprintf(filename, sizeof(filename), "%s/vendor", dirname);
> +       if (plt_sysfs_value_parse(filename, &val) < 0)
> +               goto error;
> +
> +       if (val != PCI_VENDOR_ID_CAVIUM)
> +               goto error;
> +
> +       /* Get device id  */
> +       snprintf(filename, sizeof(filename), "%s/device", dirname);
> +       if (plt_sysfs_value_parse(filename, &val) < 0)
> +               goto error;
> +
> +       /* Check if device ID belongs to any RVU device */
> +       if (!is_rvu_device(val))
> +               goto error;
> +
> +       /* Get subsystem_device id */
> +       snprintf(filename, sizeof(filename), "%s/subsystem_device", dirname);
> +       if (plt_sysfs_value_parse(filename, &val) < 0)
> +               goto error;
> +
> +       *part = val >> MODEL_CN10K_PART_SHIFT;
> +
> +       /* Get revision for pass value*/
> +       snprintf(filename, sizeof(filename), "%s/revision", dirname);
> +       if (plt_sysfs_value_parse(filename, &val) < 0)
> +               goto error;
> +
> +       *pass = val & MODEL_CN10K_PASS_MASK;
> +
> +       return 0;
> +error:
> +       return -EINVAL;
> +}
> +
> +/* Scans through all PCI devices, detects RVU device and returns
> + * subsystem_device
> + */
> +static int
> +cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
> +{
> +#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
> +       char dirname[PATH_MAX];
> +       struct dirent *e;
> +       DIR *dir;
> +
> +       dir = opendir(SYSFS_PCI_DEVICES);
> +       if (dir == NULL) {
> +               plt_err("%s(): opendir failed: %s\n", __func__,
> +                       strerror(errno));
> +               return -errno;
>         }
>
> -fclose:
> -       fclose(fd);
> +       while ((e = readdir(dir)) != NULL) {
> +               if (e->d_name[0] == '.')
> +                       continue;
> +
> +               snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
> +                        e->d_name);
> +
> +               /* Lookup for rvu device and get part pass information */
> +               if (!rvu_device_lookup(dirname, part, pass))
> +                       break;
> +       }
>
> -err:
> -       return soc;
> +       closedir(dir);
> +       return 0;
>  }
>
>  static bool
>  populate_model(struct roc_model *model, uint32_t midr)
>  {
> -       uint32_t impl, major, part, minor;
> +       uint32_t impl, major, part, minor, pass;
>         bool found = false;
>         size_t i;
>
> @@ -124,8 +181,19 @@ populate_model(struct roc_model *model, uint32_t midr)
>         minor = (midr >> MODEL_MINOR_SHIFT) & MODEL_MINOR_MASK;
>
>         /* Update part number for cn10k from device-tree */
> -       if (part == SOC_PART_CN10K)
> -               part = cn10k_part_get();
> +       if (part == SOC_PART_CN10K) {
> +               if (cn10k_part_pass_get(&part, &pass))
> +                       goto not_found;
> +               /*
> +                * Pass value format:
> +                * Bits 0..1: minor pass
> +                * Bits 3..2: major pass
> +                */
> +               minor = (pass >> MODEL_CN10K_MINOR_SHIFT) &
> +                       MODEL_CN10K_MINOR_MASK;
> +               major = (pass >> MODEL_CN10K_MAJOR_SHIFT) &
> +                       MODEL_CN10K_MAJOR_MASK;
> +       }
>
>         for (i = 0; i < PLT_DIM(model_db); i++)
>                 if (model_db[i].impl == impl && model_db[i].part == part &&
> @@ -136,7 +204,7 @@ populate_model(struct roc_model *model, uint32_t midr)
>                         found = true;
>                         break;
>                 }
> -
> +not_found:
>         if (!found) {
>                 model->flag = 0;
>                 strncpy(model->name, "unknown", ROC_MODEL_STR_LEN_MAX - 1);
> diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
> index 502f243..3e7adfc 100644
> --- a/drivers/common/cnxk/roc_platform.h
> +++ b/drivers/common/cnxk/roc_platform.h
> @@ -24,6 +24,8 @@
>  #include <rte_tailq.h>
>  #include <rte_telemetry.h>
>
> +#include "eal_filesystem.h"
> +
>  #include "roc_bits.h"
>
>  #if defined(__ARM_FEATURE_SVE)
> @@ -94,6 +96,7 @@
>  #define plt_pci_device             rte_pci_device
>  #define plt_pci_read_config        rte_pci_read_config
>  #define plt_pci_find_ext_capability rte_pci_find_ext_capability
> +#define plt_sysfs_value_parse      eal_parse_sysfs_value
>
>  #define plt_log2_u32    rte_log2_u32
>  #define plt_cpu_to_be_16 rte_cpu_to_be_16
> --
> 2.8.4
>

^ permalink raw reply	[flat|nested] 89+ messages in thread

* Re: [EXT] Re: [PATCH 01/23] common/cnxk: fix part value for cn10k
  2022-08-30  4:51 ` [PATCH 01/23] common/cnxk: fix part value for cn10k Jerin Jacob
@ 2022-08-30  5:16   ` Nithin Kumar Dabilpuram
  0 siblings, 0 replies; 89+ messages in thread
From: Nithin Kumar Dabilpuram @ 2022-08-30  5:16 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Kiran Kumar Kokkilagadda, Sunil Kumar Kori,
	Satha Koteswara Rao Kottidi, Jerin Jacob Kollanukkaran, dpdk-dev,
	Harman Kalra


On 2022-08-30 10:21 AM, Jerin Jacob wrote:
> External Email
>
> ----------------------------------------------------------------------
> On Wed, Aug 10, 2022 at 12:19 AM Nithin Dabilpuram
> <ndabilpuram@marvell.com> wrote:
>> From: Harman Kalra <hkalra@marvell.com>
>>
>> Updating the logic for getting part and pass value for cn10k family,
>> as device tree compatible logic does not work in VMs.
>> Scanning all the PCI device and detect first RVU device, subsystem
>> device file gives part no and revision file provide pass information.
>>
>> Fixes: 014a9e222bac ("common/cnxk: add model init and IO handling API")
>>
>> Signed-off-by: Harman Kalra <hkalra@marvell.com>
>> ---
>>
>> Depends-on: series-23650("[v2] event/cnxk: add eth port specific PTP enable")
>> Depends-on: series-24029("[1/4] cnxk/net: add fc check in vector event Tx path")
> # In order to merge the series, We need to remove the dependency with
> event/cnxk patches as both are going to different trees.
> Could you just pull 1/4 of
> https://urldefense.proofpoint.com/v2/url?u=https-3A__patches.dpdk.org_project_dpdk_patch_20220719111125.8276-2D1-2Dpbhagavatula-40marvell.com_&d=DwIBaQ&c=nKjWec2b6R0mOyPaz7xtfQ&r=FZ_tPCbgFOh18zwRPO9H0yDx8VW38vuapifdDfc8SFQ&m=gZSFnEc3tLi5Mfx1zeON2drnvKyS7iBqBxed7QZ3oeTBluipIC5rKPgM4ZS_dNhG&s=tXO3s0scoNyA6XdphxZc7BEa5F8AD22dnr0XdrjSNds&e=
> and rebase this series on top of dpdk-next-net-mrvl and send the next version.
>
>
> # Also, Please update git commit log in this series whenever there are
> only one liners.
>
Ack, will send v2.
>
>>   drivers/common/cnxk/roc_model.c    | 152 +++++++++++++++++++++++++++----------
>>   drivers/common/cnxk/roc_platform.h |   3 +
>>   2 files changed, 113 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
>> index a68baa6..791ffa6 100644
>> --- a/drivers/common/cnxk/roc_model.c
>> +++ b/drivers/common/cnxk/roc_model.c
>> @@ -2,6 +2,7 @@
>>    * Copyright(C) 2021 Marvell.
>>    */
>>
>> +#include <dirent.h>
>>   #include <fcntl.h>
>>   #include <unistd.h>
>>
>> @@ -40,6 +41,16 @@ struct roc_model *roc_model;
>>   #define MODEL_MINOR_SHIFT 0
>>   #define MODEL_MINOR_MASK  ((1 << MODEL_MINOR_BITS) - 1)
>>
>> +#define MODEL_CN10K_PART_SHIFT 8
>> +#define MODEL_CN10K_PASS_BITS  4
>> +#define MODEL_CN10K_PASS_MASK  ((1 << MODEL_CN10K_PASS_BITS) - 1)
>> +#define MODEL_CN10K_MAJOR_BITS 2
>> +#define MODEL_CN10K_MAJOR_SHIFT 2
>> +#define MODEL_CN10K_MAJOR_MASK ((1 << MODEL_CN10K_MAJOR_BITS) - 1)
>> +#define MODEL_CN10K_MINOR_BITS 2
>> +#define MODEL_CN10K_MINOR_SHIFT 0
>> +#define MODEL_CN10K_MINOR_MASK ((1 << MODEL_CN10K_MINOR_BITS) - 1)
>> +
>>   static const struct model_db {
>>          uint32_t impl;
>>          uint32_t part;
>> @@ -66,55 +77,101 @@ static const struct model_db {
>>          {VENDOR_CAVIUM, PART_95xxMM, 0, 0, ROC_MODEL_CNF95xxMM_A0,
>>           "cnf95xxmm_a0"}};
>>
>> -static uint32_t
>> -cn10k_part_get(void)
>> +/* Detect if RVU device */
>> +static bool
>> +is_rvu_device(unsigned long val)
>>   {
>> -       uint32_t soc = 0x0;
>> -       char buf[BUFSIZ];
>> -       char *ptr;
>> -       FILE *fd;
>> -
>> -       /* Read the CPU compatible variant */
>> -       fd = fopen("/proc/device-tree/compatible", "r");
>> -       if (!fd) {
>> -               plt_err("Failed to open /proc/device-tree/compatible");
>> -               goto err;
>> -       }
>> +       return (val == PCI_DEVID_CNXK_RVU_PF || val == PCI_DEVID_CNXK_RVU_VF ||
>> +               val == PCI_DEVID_CNXK_RVU_AF ||
>> +               val == PCI_DEVID_CNXK_RVU_AF_VF ||
>> +               val == PCI_DEVID_CNXK_RVU_NPA_PF ||
>> +               val == PCI_DEVID_CNXK_RVU_NPA_VF ||
>> +               val == PCI_DEVID_CNXK_RVU_SSO_TIM_PF ||
>> +               val == PCI_DEVID_CNXK_RVU_SSO_TIM_VF ||
>> +               val == PCI_DEVID_CN10K_RVU_CPT_PF ||
>> +               val == PCI_DEVID_CN10K_RVU_CPT_VF);
>> +}
>>
>> -       if (fgets(buf, sizeof(buf), fd) == NULL) {
>> -               plt_err("Failed to read from /proc/device-tree/compatible");
>> -               goto fclose;
>> -       }
>> -       ptr = strchr(buf, ',');
>> -       if (!ptr) {
>> -               plt_err("Malformed 'CPU compatible': <%s>", buf);
>> -               goto fclose;
>> -       }
>> -       ptr++;
>> -       if (strcmp("cn10ka", ptr) == 0) {
>> -               soc = PART_106xx;
>> -       } else if (strcmp("cnf10ka", ptr) == 0) {
>> -               soc = PART_105xx;
>> -       } else if (strcmp("cnf10kb", ptr) == 0) {
>> -               soc = PART_105xxN;
>> -       } else if (strcmp("cn10kb", ptr) == 0) {
>> -               soc = PART_103xx;
>> -       } else {
>> -               plt_err("Unidentified 'CPU compatible': <%s>", ptr);
>> -               goto fclose;
>> +static int
>> +rvu_device_lookup(const char *dirname, uint32_t *part, uint32_t *pass)
>> +{
>> +       char filename[PATH_MAX];
>> +       unsigned long val;
>> +
>> +       /* Check if vendor id is cavium */
>> +       snprintf(filename, sizeof(filename), "%s/vendor", dirname);
>> +       if (plt_sysfs_value_parse(filename, &val) < 0)
>> +               goto error;
>> +
>> +       if (val != PCI_VENDOR_ID_CAVIUM)
>> +               goto error;
>> +
>> +       /* Get device id  */
>> +       snprintf(filename, sizeof(filename), "%s/device", dirname);
>> +       if (plt_sysfs_value_parse(filename, &val) < 0)
>> +               goto error;
>> +
>> +       /* Check if device ID belongs to any RVU device */
>> +       if (!is_rvu_device(val))
>> +               goto error;
>> +
>> +       /* Get subsystem_device id */
>> +       snprintf(filename, sizeof(filename), "%s/subsystem_device", dirname);
>> +       if (plt_sysfs_value_parse(filename, &val) < 0)
>> +               goto error;
>> +
>> +       *part = val >> MODEL_CN10K_PART_SHIFT;
>> +
>> +       /* Get revision for pass value*/
>> +       snprintf(filename, sizeof(filename), "%s/revision", dirname);
>> +       if (plt_sysfs_value_parse(filename, &val) < 0)
>> +               goto error;
>> +
>> +       *pass = val & MODEL_CN10K_PASS_MASK;
>> +
>> +       return 0;
>> +error:
>> +       return -EINVAL;
>> +}
>> +
>> +/* Scans through all PCI devices, detects RVU device and returns
>> + * subsystem_device
>> + */
>> +static int
>> +cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
>> +{
>> +#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
>> +       char dirname[PATH_MAX];
>> +       struct dirent *e;
>> +       DIR *dir;
>> +
>> +       dir = opendir(SYSFS_PCI_DEVICES);
>> +       if (dir == NULL) {
>> +               plt_err("%s(): opendir failed: %s\n", __func__,
>> +                       strerror(errno));
>> +               return -errno;
>>          }
>>
>> -fclose:
>> -       fclose(fd);
>> +       while ((e = readdir(dir)) != NULL) {
>> +               if (e->d_name[0] == '.')
>> +                       continue;
>> +
>> +               snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
>> +                        e->d_name);
>> +
>> +               /* Lookup for rvu device and get part pass information */
>> +               if (!rvu_device_lookup(dirname, part, pass))
>> +                       break;
>> +       }
>>
>> -err:
>> -       return soc;
>> +       closedir(dir);
>> +       return 0;
>>   }
>>
>>   static bool
>>   populate_model(struct roc_model *model, uint32_t midr)
>>   {
>> -       uint32_t impl, major, part, minor;
>> +       uint32_t impl, major, part, minor, pass;
>>          bool found = false;
>>          size_t i;
>>
>> @@ -124,8 +181,19 @@ populate_model(struct roc_model *model, uint32_t midr)
>>          minor = (midr >> MODEL_MINOR_SHIFT) & MODEL_MINOR_MASK;
>>
>>          /* Update part number for cn10k from device-tree */
>> -       if (part == SOC_PART_CN10K)
>> -               part = cn10k_part_get();
>> +       if (part == SOC_PART_CN10K) {
>> +               if (cn10k_part_pass_get(&part, &pass))
>> +                       goto not_found;
>> +               /*
>> +                * Pass value format:
>> +                * Bits 0..1: minor pass
>> +                * Bits 3..2: major pass
>> +                */
>> +               minor = (pass >> MODEL_CN10K_MINOR_SHIFT) &
>> +                       MODEL_CN10K_MINOR_MASK;
>> +               major = (pass >> MODEL_CN10K_MAJOR_SHIFT) &
>> +                       MODEL_CN10K_MAJOR_MASK;
>> +       }
>>
>>          for (i = 0; i < PLT_DIM(model_db); i++)
>>                  if (model_db[i].impl == impl && model_db[i].part == part &&
>> @@ -136,7 +204,7 @@ populate_model(struct roc_model *model, uint32_t midr)
>>                          found = true;
>>                          break;
>>                  }
>> -
>> +not_found:
>>          if (!found) {
>>                  model->flag = 0;
>>                  strncpy(model->name, "unknown", ROC_MODEL_STR_LEN_MAX - 1);
>> diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
>> index 502f243..3e7adfc 100644
>> --- a/drivers/common/cnxk/roc_platform.h
>> +++ b/drivers/common/cnxk/roc_platform.h
>> @@ -24,6 +24,8 @@
>>   #include <rte_tailq.h>
>>   #include <rte_telemetry.h>
>>
>> +#include "eal_filesystem.h"
>> +
>>   #include "roc_bits.h"
>>
>>   #if defined(__ARM_FEATURE_SVE)
>> @@ -94,6 +96,7 @@
>>   #define plt_pci_device             rte_pci_device
>>   #define plt_pci_read_config        rte_pci_read_config
>>   #define plt_pci_find_ext_capability rte_pci_find_ext_capability
>> +#define plt_sysfs_value_parse      eal_parse_sysfs_value
>>
>>   #define plt_log2_u32    rte_log2_u32
>>   #define plt_cpu_to_be_16 rte_cpu_to_be_16
>> --
>> 2.8.4
>>

^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (22 preceding siblings ...)
  2022-08-30  4:51 ` [PATCH 01/23] common/cnxk: fix part value for cn10k Jerin Jacob
@ 2022-09-05 13:31 ` Nithin Dabilpuram
  2022-09-05 13:31   ` [PATCH v2 02/31] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                     ` (29 more replies)
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
  24 siblings, 30 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:31 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add FC check in vector event Tx path, the check needs to be
performed after head wait right before LMTST is issued.
Since, SQB pool fc updates are delayed w.r.t the actual
utilization of pool add sufficient slack to avoid overflow.

Added a new device argument to override the default SQB slack
configured, can be used as follows:

    -a 0002:02:00.0,sqb_slack=32

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---

v2: 
- Included this patch from series 24029 as suggested by Jerin to resolve
  compilation dependency with event dev.
- Fixed one-liner commit messages
- Added few more patches for upstream.

 doc/guides/nics/cnxk.rst                 | 12 +++++++
 drivers/common/cnxk/roc_nix.h            |  7 ++--
 drivers/common/cnxk/roc_nix_priv.h       |  1 -
 drivers/common/cnxk/roc_nix_queue.c      | 21 +++++------
 drivers/common/cnxk/roc_nix_tm.c         |  2 +-
 drivers/common/cnxk/roc_nix_tm_ops.c     |  4 +--
 drivers/event/cnxk/cn10k_eventdev.c      |  3 +-
 drivers/event/cnxk/cn9k_eventdev.c       |  3 +-
 drivers/event/cnxk/cn9k_worker.h         |  4 +++
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  9 ++---
 drivers/net/cnxk/cn10k_tx.h              | 46 ++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_ethdev_devargs.c   |  8 ++++-
 12 files changed, 97 insertions(+), 23 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index e24eaa8bc4..eeaa3fa1cc 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -157,6 +157,18 @@ Runtime Config Options
    With the above configuration, each send queue's descriptor buffer count is
    limited to a maximum of 64 buffers.
 
+- ``SQB slack count`` (default ``12``)
+
+   Send queue descriptor slack count added to SQB count when a Tx queue is
+   created, can be set using ``sqb_slack`` ``devargs`` parameter.
+
+   For example::
+
+      -a 0002:02:00.0,sqb_slack=32
+
+   With the above configuration, each send queue's descriptor buffer count will
+   be increased by 32, while keeping the queue limit to default configuration.
+
 - ``Switch header enable`` (default ``none``)
 
    A port can be configured to a specific switch header type by using
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 4671f80e7c..c9aaedc915 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -13,6 +13,8 @@
 #define ROC_NIX_BPF_STATS_MAX	      12
 #define ROC_NIX_MTR_ID_INVALID	      UINT32_MAX
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
+#define ROC_NIX_SQB_LOWER_THRESH      70U
+#define ROC_NIX_SQB_SLACK	      12U
 
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
@@ -410,19 +412,20 @@ struct roc_nix {
 	bool enable_loop;
 	bool hw_vlan_ins;
 	uint8_t lock_rx_ctx;
-	uint32_t outb_nb_desc;
+	uint16_t sqb_slack;
 	uint16_t outb_nb_crypto_qs;
+	uint32_t outb_nb_desc;
 	uint32_t ipsec_in_min_spi;
 	uint32_t ipsec_in_max_spi;
 	uint32_t ipsec_out_max_sa;
 	bool ipsec_out_sso_pffunc;
+	bool custom_sa_action;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
 	bool io_enabled;
 	bool rx_ptp_ena;
 	uint16_t cints;
-	bool custom_sa_action;
 
 #define ROC_NIX_MEM_SZ (6 * 1024)
 	uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 5b0522c8cb..a3d4ddf5d5 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -13,7 +13,6 @@
 #define NIX_DEF_SQB	     ((uint16_t)16)
 #define NIX_MIN_SQB	     ((uint16_t)8)
 #define NIX_SQB_LIST_SPACE   ((uint16_t)2)
-#define NIX_SQB_LOWER_THRESH ((uint16_t)70)
 
 /* Apply BP/DROP when CQ is 95% full */
 #define NIX_CQ_THRESH_LEVEL	(5 * 256 / 100)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index fa4c954631..692b13415a 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -682,12 +682,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	else
 		sqes_per_sqb = (blk_sz / 8) / 8;
 
-	sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
+	sq->nb_desc = PLT_MAX(512U, sq->nb_desc);
 	nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
 	nb_sqb_bufs += NIX_SQB_LIST_SPACE;
 	/* Clamp up the SQB count */
 	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
-			      (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
+			      PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
 
 	sq->nb_sqb_bufs = nb_sqb_bufs;
 	sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
@@ -695,8 +695,9 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 		nb_sqb_bufs -
 		(PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
 	sq->nb_sqb_bufs_adj =
-		(sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
+		(sq->nb_sqb_bufs_adj * ROC_NIX_SQB_LOWER_THRESH) / 100;
 
+	nb_sqb_bufs += roc_nix->sqb_slack;
 	/* Explicitly set nat_align alone as by default pool is with both
 	 * nat_align and buf_offset = 1 which we don't want for SQB.
 	 */
@@ -711,12 +712,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 		aura.fc_stype = 0x3; /* STSTP */
 	aura.fc_addr = (uint64_t)sq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
-	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
+	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
 				 &pool);
 	if (rc)
 		goto fail;
 
-	sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
+	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
 	if (sq->sqe_mem == NULL) {
 		rc = NIX_ERR_NO_MEM;
 		goto nomem;
@@ -724,21 +725,21 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 
 	/* Fill the initial buffers */
 	iova = (uint64_t)sq->sqe_mem;
-	for (count = 0; count < NIX_MAX_SQB; count++) {
+	for (count = 0; count < nb_sqb_bufs; count++) {
 		roc_npa_aura_op_free(sq->aura_handle, 0, iova);
 		iova += blk_sz;
 	}
 
-	if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) !=
-	    NIX_MAX_SQB) {
+	if (roc_npa_aura_op_available_wait(sq->aura_handle, nb_sqb_bufs, 0) !=
+	    nb_sqb_bufs) {
 		plt_err("Failed to free all pointers to the pool");
 		rc = NIX_ERR_NO_MEM;
 		goto npa_fail;
 	}
 
 	roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
-	roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
-	sq->aura_sqb_bufs = NIX_MAX_SQB;
+	roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
+	sq->aura_sqb_bufs = nb_sqb_bufs;
 
 	return rc;
 npa_fail:
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a31abded1a..81d491a3fd 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -594,7 +594,7 @@ roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
 
 		/* SQ reached quiescent state */
 		if (sqb_cnt <= 1 && head_off == tail_off &&
-		    (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
+		    (*(volatile uint64_t *)sq->fc == sq->aura_sqb_bufs)) {
 			break;
 		}
 
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 4aa55002fe..7036495ad8 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -67,7 +67,7 @@ roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
 	if (enable)
 		*(volatile uint64_t *)sq->fc = rsp->aura.count;
 	else
-		*(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
+		*(volatile uint64_t *)sq->fc = sq->aura_sqb_bufs;
 	/* Sync write barrier */
 	plt_wmb();
 	return 0;
@@ -535,7 +535,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 		tail_off = (val >> 28) & 0x3F;
 
 		if (sqb_cnt > 1 || head_off != tail_off ||
-		    (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
+		    (*(uint64_t *)sq->fc != sq->aura_sqb_bufs))
 			plt_err("Failed to gracefully flush sq %u", sq->qid);
 	}
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 0be7ebfe29..fee01713b4 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -812,7 +812,8 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 			sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+		txq->nb_sqb_bufs_adj =
+			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 8ade30f84b..992a2a555c 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1043,7 +1043,8 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 			sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+		txq->nb_sqb_bufs_adj =
+			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 54b3545022..d86cb94a77 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -761,6 +761,10 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 	    !(flags & NIX_TX_OFFLOAD_SECURITY_F))
 		rte_io_wmb();
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
+	if (((txq->nb_sqb_bufs_adj -
+	      __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
+	     << txq->sqes_per_sqb_log2) <= 0)
+		return 0;
 	cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
 	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
 			      txq->mark_fmt);
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index b4fd821912..7937cadd25 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -351,14 +351,15 @@ cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
 {
 	int rc;
 
-	if (sq->nb_sqb_bufs != nb_sqb_bufs) {
+	if (sq->aura_sqb_bufs != nb_sqb_bufs) {
 		rc = roc_npa_aura_limit_modify(
 			sq->aura_handle,
 			RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
 		if (rc < 0)
 			return rc;
 
-		sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs);
+		sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs) -
+				  sq->roc_nix->sqb_slack;
 	}
 	return 0;
 }
@@ -556,7 +557,7 @@ cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
 	} else {
 		txq = eth_dev->data->tx_queues[tx_queue_id];
 		sq = &cnxk_eth_dev->sqs[tx_queue_id];
-		cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+		cnxk_sso_sqb_aura_limit_edit(sq, sq->aura_sqb_bufs);
 		ret = cnxk_sso_updt_tx_queue_data(
 			event_dev, eth_dev->data->port_id, tx_queue_id, txq);
 		if (ret < 0)
@@ -588,7 +589,7 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
 							     i);
 	} else {
 		sq = &cnxk_eth_dev->sqs[tx_queue_id];
-		cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+		cnxk_sso_sqb_aura_limit_edit(sq, sq->aura_sqb_bufs);
 		ret = cnxk_sso_updt_tx_queue_data(
 			event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
 		if (ret < 0)
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index ea13866b20..8056510589 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -54,6 +54,31 @@
 
 #define NIX_NB_SEGS_TO_SEGDW(x) ((NIX_SEGDW_MAGIC >> ((x) << 2)) & 0xF)
 
+static __plt_always_inline void
+cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
+{
+	int64_t cached, refill;
+
+retry:
+	while (__atomic_load_n(&txq->fc_cache_pkts, __ATOMIC_RELAXED) < 0)
+		;
+	cached = __atomic_sub_fetch(&txq->fc_cache_pkts, req, __ATOMIC_ACQUIRE);
+	/* Check if there is enough space, else update and retry. */
+	if (cached < 0) {
+		/* Check if we have space else retry. */
+		do {
+			refill =
+				(txq->nb_sqb_bufs_adj -
+				 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
+				<< txq->sqes_per_sqb_log2;
+		} while (refill <= 0);
+		__atomic_compare_exchange(&txq->fc_cache_pkts, &cached, &refill,
+					  0, __ATOMIC_RELEASE,
+					  __ATOMIC_RELAXED);
+		goto retry;
+	}
+}
+
 /* Function to determine no of tx subdesc required in case ext
  * sub desc is enabled.
  */
@@ -1039,6 +1064,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= (15ULL << 12);
 		data |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, 16);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data, pa);
 
@@ -1048,6 +1075,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= ((uint64_t)(burst - 17)) << 12;
 		data |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst - 16);
 		/* STEOR1 */
 		roc_lmt_submit_steorl(data, pa);
 	} else if (burst) {
@@ -1057,6 +1086,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= ((uint64_t)(burst - 1)) << 12;
 		data |= lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data, pa);
 	}
@@ -1188,6 +1219,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data0 |= (15ULL << 12);
 		data0 |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, 16);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data0, pa0);
 
@@ -1197,6 +1230,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data1 |= ((uint64_t)(burst - 17)) << 12;
 		data1 |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst - 16);
 		/* STEOR1 */
 		roc_lmt_submit_steorl(data1, pa1);
 	} else if (burst) {
@@ -1207,6 +1242,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data0 |= ((burst - 1) << 12);
 		data0 |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data0, pa0);
 	}
@@ -2735,6 +2772,9 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[0] |= (15ULL << 12);
 		wd.data[0] |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq,
+				cn10k_nix_pkts_per_vec_brst(flags) >> 1);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(wd.data[0], pa);
 
@@ -2750,6 +2790,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[1] |= ((uint64_t)(lnum - 17)) << 12;
 		wd.data[1] |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq,
+				burst - (cn10k_nix_pkts_per_vec_brst(flags) >>
+					 1));
 		/* STEOR1 */
 		roc_lmt_submit_steorl(wd.data[1], pa);
 	} else if (lnum) {
@@ -2765,6 +2809,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[0] |= ((uint64_t)(lnum - 1)) << 12;
 		wd.data[0] |= lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(wd.data[0], pa);
 	}
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 248582e1f6..4ded850622 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -246,6 +246,7 @@ parse_sdp_channel_mask(const char *key, const char *value, void *extra_args)
 #define CNXK_SDP_CHANNEL_MASK	"sdp_channel_mask"
 #define CNXK_FLOW_PRE_L2_INFO	"flow_pre_l2_info"
 #define CNXK_CUSTOM_SA_ACT	"custom_sa_act"
+#define CNXK_SQB_SLACK		"sqb_slack"
 
 int
 cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -254,6 +255,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	uint16_t sqb_count = CNXK_NIX_TX_MAX_SQB;
 	struct flow_pre_l2_size_info pre_l2_info;
 	uint32_t ipsec_in_max_spi = BIT(8) - 1;
+	uint16_t sqb_slack = ROC_NIX_SQB_SLACK;
 	uint32_t ipsec_out_max_sa = BIT(12);
 	uint16_t flow_prealloc_size = 1;
 	uint16_t switch_header_type = 0;
@@ -311,6 +313,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 			   &parse_pre_l2_hdr_info, &pre_l2_info);
 	rte_kvargs_process(kvlist, CNXK_CUSTOM_SA_ACT, &parse_flag,
 			   &custom_sa_act);
+	rte_kvargs_process(kvlist, CNXK_SQB_SLACK, &parse_sqb_count,
+			   &sqb_slack);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -328,6 +332,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	dev->nix.reta_sz = reta_sz;
 	dev->nix.lock_rx_ctx = lock_rx_ctx;
 	dev->nix.custom_sa_action = custom_sa_act;
+	dev->nix.sqb_slack = sqb_slack;
 	dev->npc.flow_prealloc_size = flow_prealloc_size;
 	dev->npc.flow_max_priority = flow_max_priority;
 	dev->npc.switch_header_type = switch_header_type;
@@ -356,4 +361,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
 			      CNXK_OUTB_NB_CRYPTO_QS "=<1-64>"
 			      CNXK_NO_INL_DEV "=0"
 			      CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>"
-			      CNXK_CUSTOM_SA_ACT "=1");
+			      CNXK_CUSTOM_SA_ACT "=1"
+			      CNXK_SQB_SLACK "=<12-512>");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 02/31] common/cnxk: fix part value for cn10k
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
@ 2022-09-05 13:31   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 03/31] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
                     ` (28 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:31 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Updating the logic for getting part and pass value for cn10k family,
as device tree compatible logic does not work in VMs.
Scanning all the PCI device and detect first RVU device, subsystem
device file gives part no and revision file provide pass information.

Fixes: 014a9e222bac ("common/cnxk: add model init and IO handling API")

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c    | 152 +++++++++++++++++++++--------
 drivers/common/cnxk/roc_platform.h |   3 +
 2 files changed, 113 insertions(+), 42 deletions(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index c934a10509..626de60fb0 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */
 
+#include <dirent.h>
 #include <fcntl.h>
 #include <unistd.h>
 
@@ -40,6 +41,16 @@ struct roc_model *roc_model;
 #define MODEL_MINOR_SHIFT 0
 #define MODEL_MINOR_MASK  ((1 << MODEL_MINOR_BITS) - 1)
 
+#define MODEL_CN10K_PART_SHIFT	8
+#define MODEL_CN10K_PASS_BITS	4
+#define MODEL_CN10K_PASS_MASK	((1 << MODEL_CN10K_PASS_BITS) - 1)
+#define MODEL_CN10K_MAJOR_BITS	2
+#define MODEL_CN10K_MAJOR_SHIFT 2
+#define MODEL_CN10K_MAJOR_MASK	((1 << MODEL_CN10K_MAJOR_BITS) - 1)
+#define MODEL_CN10K_MINOR_BITS	2
+#define MODEL_CN10K_MINOR_SHIFT 0
+#define MODEL_CN10K_MINOR_MASK	((1 << MODEL_CN10K_MINOR_BITS) - 1)
+
 static const struct model_db {
 	uint32_t impl;
 	uint32_t part;
@@ -66,55 +77,101 @@ static const struct model_db {
 	{VENDOR_CAVIUM, PART_95xxMM, 0, 0, ROC_MODEL_CNF95xxMM_A0,
 	 "cnf95xxmm_a0"}};
 
-static uint32_t
-cn10k_part_get(void)
+/* Detect if RVU device */
+static bool
+is_rvu_device(unsigned long val)
 {
-	uint32_t soc = 0x0;
-	char buf[BUFSIZ];
-	char *ptr;
-	FILE *fd;
-
-	/* Read the CPU compatible variant */
-	fd = fopen("/proc/device-tree/compatible", "r");
-	if (!fd) {
-		plt_err("Failed to open /proc/device-tree/compatible");
-		goto err;
-	}
+	return (val == PCI_DEVID_CNXK_RVU_PF || val == PCI_DEVID_CNXK_RVU_VF ||
+		val == PCI_DEVID_CNXK_RVU_AF ||
+		val == PCI_DEVID_CNXK_RVU_AF_VF ||
+		val == PCI_DEVID_CNXK_RVU_NPA_PF ||
+		val == PCI_DEVID_CNXK_RVU_NPA_VF ||
+		val == PCI_DEVID_CNXK_RVU_SSO_TIM_PF ||
+		val == PCI_DEVID_CNXK_RVU_SSO_TIM_VF ||
+		val == PCI_DEVID_CN10K_RVU_CPT_PF ||
+		val == PCI_DEVID_CN10K_RVU_CPT_VF);
+}
 
-	if (fgets(buf, sizeof(buf), fd) == NULL) {
-		plt_err("Failed to read from /proc/device-tree/compatible");
-		goto fclose;
-	}
-	ptr = strchr(buf, ',');
-	if (!ptr) {
-		plt_err("Malformed 'CPU compatible': <%s>", buf);
-		goto fclose;
-	}
-	ptr++;
-	if (strcmp("cn10ka", ptr) == 0) {
-		soc = PART_106xx;
-	} else if (strcmp("cnf10ka", ptr) == 0) {
-		soc = PART_105xx;
-	} else if (strcmp("cnf10kb", ptr) == 0) {
-		soc = PART_105xxN;
-	} else if (strcmp("cn10kb", ptr) == 0) {
-		soc = PART_103xx;
-	} else {
-		plt_err("Unidentified 'CPU compatible': <%s>", ptr);
-		goto fclose;
+static int
+rvu_device_lookup(const char *dirname, uint32_t *part, uint32_t *pass)
+{
+	char filename[PATH_MAX];
+	unsigned long val;
+
+	/* Check if vendor id is cavium */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	if (val != PCI_VENDOR_ID_CAVIUM)
+		goto error;
+
+	/* Get device id  */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	/* Check if device ID belongs to any RVU device */
+	if (!is_rvu_device(val))
+		goto error;
+
+	/* Get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	*part = val >> MODEL_CN10K_PART_SHIFT;
+
+	/* Get revision for pass value*/
+	snprintf(filename, sizeof(filename), "%s/revision", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	*pass = val & MODEL_CN10K_PASS_MASK;
+
+	return 0;
+error:
+	return -EINVAL;
+}
+
+/* Scans through all PCI devices, detects RVU device and returns
+ * subsystem_device
+ */
+static int
+cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
+{
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+	char dirname[PATH_MAX];
+	struct dirent *e;
+	DIR *dir;
+
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL) {
+		plt_err("%s(): opendir failed: %s\n", __func__,
+			strerror(errno));
+		return -errno;
 	}
 
-fclose:
-	fclose(fd);
+	while ((e = readdir(dir)) != NULL) {
+		if (e->d_name[0] == '.')
+			continue;
+
+		snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
+			 e->d_name);
+
+		/* Lookup for rvu device and get part pass information */
+		if (!rvu_device_lookup(dirname, part, pass))
+			break;
+	}
 
-err:
-	return soc;
+	closedir(dir);
+	return 0;
 }
 
 static bool
 populate_model(struct roc_model *model, uint32_t midr)
 {
-	uint32_t impl, major, part, minor;
+	uint32_t impl, major, part, minor, pass;
 	bool found = false;
 	size_t i;
 
@@ -124,8 +181,19 @@ populate_model(struct roc_model *model, uint32_t midr)
 	minor = (midr >> MODEL_MINOR_SHIFT) & MODEL_MINOR_MASK;
 
 	/* Update part number for cn10k from device-tree */
-	if (part == SOC_PART_CN10K)
-		part = cn10k_part_get();
+	if (part == SOC_PART_CN10K) {
+		if (cn10k_part_pass_get(&part, &pass))
+			goto not_found;
+		/*
+		 * Pass value format:
+		 * Bits 0..1: minor pass
+		 * Bits 3..2: major pass
+		 */
+		minor = (pass >> MODEL_CN10K_MINOR_SHIFT) &
+			MODEL_CN10K_MINOR_MASK;
+		major = (pass >> MODEL_CN10K_MAJOR_SHIFT) &
+			MODEL_CN10K_MAJOR_MASK;
+	}
 
 	for (i = 0; i < PLT_DIM(model_db); i++)
 		if (model_db[i].impl == impl && model_db[i].part == part &&
@@ -136,7 +204,7 @@ populate_model(struct roc_model *model, uint32_t midr)
 			found = true;
 			break;
 		}
-
+not_found:
 	if (!found) {
 		model->flag = 0;
 		strncpy(model->name, "unknown", ROC_MODEL_STR_LEN_MAX - 1);
diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
index 502f243a81..3e7adfc5b8 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -24,6 +24,8 @@
 #include <rte_tailq.h>
 #include <rte_telemetry.h>
 
+#include "eal_filesystem.h"
+
 #include "roc_bits.h"
 
 #if defined(__ARM_FEATURE_SVE)
@@ -94,6 +96,7 @@
 #define plt_pci_device		    rte_pci_device
 #define plt_pci_read_config	    rte_pci_read_config
 #define plt_pci_find_ext_capability rte_pci_find_ext_capability
+#define plt_sysfs_value_parse	    eal_parse_sysfs_value
 
 #define plt_log2_u32	 rte_log2_u32
 #define plt_cpu_to_be_16 rte_cpu_to_be_16
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 03/31] common/cnxk: add cn10ka A1 platform
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
  2022-09-05 13:31   ` [PATCH v2 02/31] common/cnxk: fix part value for cn10k Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 04/31] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
                     ` (27 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for cn10ka A1 pass. It is next
minor pass of A0.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 1 +
 drivers/common/cnxk/roc_model.h | 9 ++++++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index 626de60fb0..bdbd9a96b2 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -60,6 +60,7 @@ static const struct model_db {
 	char name[ROC_MODEL_STR_LEN_MAX];
 } model_db[] = {
 	{VENDOR_ARM, PART_106xx, 0, 0, ROC_MODEL_CN106xx_A0, "cn10ka_a0"},
+	{VENDOR_ARM, PART_106xx, 0, 1, ROC_MODEL_CN106xx_A1, "cn10ka_a1"},
 	{VENDOR_ARM, PART_105xx, 0, 0, ROC_MODEL_CNF105xx_A0, "cnf10ka_a0"},
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index 37c8a4744d..d231d44b60 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -25,6 +25,7 @@ struct roc_model {
 #define ROC_MODEL_CNF105xx_A0  BIT_ULL(21)
 #define ROC_MODEL_CNF105xxN_A0 BIT_ULL(22)
 #define ROC_MODEL_CN103xx_A0   BIT_ULL(23)
+#define ROC_MODEL_CN106xx_A1   BIT_ULL(24)
 /* Following flags describe platform code is running on */
 #define ROC_ENV_HW   BIT_ULL(61)
 #define ROC_ENV_EMUL BIT_ULL(62)
@@ -48,7 +49,7 @@ struct roc_model {
 	 ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CNF95xxN_A1 |                       \
 	 ROC_MODEL_CNF95xxN_B0)
 
-#define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0)
+#define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0 | ROC_MODEL_CN106xx_A1)
 #define ROC_MODEL_CNF105xx  (ROC_MODEL_CNF105xx_A0)
 #define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0)
 #define ROC_MODEL_CN103xx   (ROC_MODEL_CN103xx_A0)
@@ -191,6 +192,12 @@ roc_model_is_cn10ka_a0(void)
 	return roc_model->flag & ROC_MODEL_CN106xx_A0;
 }
 
+static inline uint64_t
+roc_model_is_cn10ka_a1(void)
+{
+	return roc_model->flag & ROC_MODEL_CN106xx_A1;
+}
+
 static inline uint64_t
 roc_model_is_cnf10ka_a0(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 04/31] common/cnxk: update inbound inline IPsec config mailbox
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
  2022-09-05 13:31   ` [PATCH v2 02/31] common/cnxk: fix part value for cn10k Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 03/31] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 05/31] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
                     ` (26 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev, Srujana Challa

From: Srujana Challa <schalla@marvell.com>

Updates CPT inbound inline IPsec configuration mailbox
to provide opcode and CPT credit from VF.
This patch also adds mailbox for reading inbound IPsec
configuration.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/common/cnxk/roc_cpt.c   | 15 +++++++++++++++
 drivers/common/cnxk/roc_cpt.h   |  2 ++
 drivers/common/cnxk/roc_mbox.h  | 12 +++++++++---
 drivers/common/cnxk/version.map |  1 +
 4 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index f1be6a3401..d607bde3c4 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -260,6 +260,21 @@ roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
 	return cpt_lf_outb_cfg(cpt_dev, sso_pf_func, nix_pf_func, lf_id, ena);
 }
 
+int
+roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
+				  struct nix_inline_ipsec_cfg *inb_cfg)
+{
+	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
+	struct dev *dev = &cpt->dev;
+	struct msg_req *req;
+
+	req = mbox_alloc_msg_nix_read_inline_ipsec_cfg(dev->mbox);
+	if (req == NULL)
+		return -EIO;
+
+	return mbox_process_msg(dev->mbox, (void *)&inb_cfg);
+}
+
 int
 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 			     uint16_t param2)
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index a3a65f1e94..4e3a078a90 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -158,6 +158,8 @@ int __roc_api roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, void *cptr,
 int __roc_api roc_cpt_lf_ctx_reload(struct roc_cpt_lf *lf, void *cptr);
 int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 				       struct roc_nix *nix);
+int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(
+	struct roc_cpt *roc_cpt, struct nix_inline_ipsec_cfg *inb_cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
 					   uint16_t param1, uint16_t param2);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 965c704322..912de1121b 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -263,7 +263,9 @@ struct mbox_msghdr {
 	  nix_bp_cfg_rsp)                                                      \
 	M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req,      \
 	  msg_rsp)                                                             \
-	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)
+	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)            \
+	M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg,        \
+	  msg_req, nix_inline_ipsec_cfg)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1161,7 +1163,9 @@ struct nix_inline_ipsec_cfg {
 	uint32_t __io cpt_credit;
 	struct {
 		uint8_t __io egrp;
-		uint8_t __io opcode;
+		uint16_t __io opcode;
+		uint16_t __io param1;
+		uint16_t __io param2;
 	} gen_cfg;
 	struct {
 		uint16_t __io cpt_pf_func;
@@ -1465,7 +1469,9 @@ struct cpt_rx_inline_lf_cfg_msg {
 	uint16_t __io sso_pf_func;
 	uint16_t __io param1;
 	uint16_t __io param2;
-	uint16_t __io reserved;
+	uint16_t __io opcode;
+	uint32_t __io credit;
+	uint32_t __io reserved;
 };
 
 enum cpt_eng_type {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 019f53173f..a2d99e1f4a 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -65,6 +65,7 @@ INTERNAL {
 	roc_cpt_dev_init;
 	roc_cpt_eng_grp_add;
 	roc_cpt_inline_ipsec_cfg;
+	roc_cpt_inline_ipsec_inb_cfg_read;
 	roc_cpt_inline_ipsec_inb_cfg;
 	roc_cpt_iq_disable;
 	roc_cpt_iq_enable;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 05/31] net/cnxk: fix missing fc wait for outbound path in vec mode
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (2 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 04/31] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 06/31] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
                     ` (25 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Fix missing fc wait for outbound path in vector mode.
Currently only poll mode has it.

Fixes: 358d02d20a2f ("net/cnxk: support flow control for outbound inline")
Cc: ndabilpuram@marvell.com

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 8056510589..07c88a974e 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1049,9 +1049,13 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		uint16_t sec_pkts = ((c_lnum << 1) + c_loff);
+
 		/* Reduce pkts to be sent to CPT */
-		burst -= ((c_lnum << 1) + c_loff);
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		burst -= sec_pkts;
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
@@ -1199,9 +1203,13 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		uint16_t sec_pkts = ((c_lnum << 1) + c_loff);
+
 		/* Reduce pkts to be sent to CPT */
-		burst -= ((c_lnum << 1) + c_loff);
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		burst -= sec_pkts;
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
@@ -2753,7 +2761,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		uint16_t sec_pkts = (c_lnum << 1) + c_loff;
+
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 06/31] common/cnxk: limit meta aura workaround to CN10K A0
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (3 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 05/31] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 07/31] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
                     ` (24 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Limit meta aura workaround to CN10K A0.
Also other NIX and Inline related Erratas applicable
for CN10K A1.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_errata.h  |  7 +++++++
 drivers/common/cnxk/roc_nix_inl.c | 10 ++++++----
 drivers/net/cnxk/cnxk_ethdev.c    |  3 ++-
 3 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index f04829736b..8dc372f956 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -80,6 +80,13 @@ roc_errata_nix_has_perf_issue_on_stats_update(void)
 /* Errata IPBUCPT-38726, IPBUCPT-38727 */
 static inline bool
 roc_errata_cpt_hang_on_x2p_bp(void)
+{
+	return roc_model_is_cn10ka_a0() || roc_model_is_cn10ka_a1();
+}
+
+/* IPBUNIXRX-40400 */
+static inline bool
+roc_errata_nix_no_meta_aura(void)
 {
 	return roc_model_is_cn10ka_a0();
 }
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 7da89382e9..603551bf83 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -627,18 +627,18 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 	inl_rq->first_skip = rq->first_skip;
 	inl_rq->later_skip = rq->later_skip;
 	inl_rq->lpb_size = rq->lpb_size;
-	inl_rq->lpb_drop_ena = true;
 	inl_rq->spb_ena = rq->spb_ena;
 	inl_rq->spb_aura_handle = rq->spb_aura_handle;
 	inl_rq->spb_size = rq->spb_size;
-	inl_rq->spb_drop_ena = !!rq->spb_ena;
 
-	if (!roc_model_is_cn9k()) {
+	if (roc_errata_nix_no_meta_aura()) {
 		uint64_t aura_limit =
 			roc_npa_aura_op_limit_get(inl_rq->aura_handle);
 		uint64_t aura_shift = plt_log2_u32(aura_limit);
 		uint64_t aura_drop, drop_pc;
 
+		inl_rq->lpb_drop_ena = true;
+
 		if (aura_shift < 8)
 			aura_shift = 0;
 		else
@@ -653,12 +653,14 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 		roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
 	}
 
-	if (inl_rq->spb_ena) {
+	if (roc_errata_nix_no_meta_aura() && inl_rq->spb_ena) {
 		uint64_t aura_limit =
 			roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
 		uint64_t aura_shift = plt_log2_u32(aura_limit);
 		uint64_t aura_drop, drop_pc;
 
+		inl_rq->spb_drop_ena = true;
+
 		if (aura_shift < 8)
 			aura_shift = 0;
 		else
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index cfcc4df916..d90baabc4d 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -617,7 +617,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rq->first_skip = first_skip;
 	rq->later_skip = sizeof(struct rte_mbuf);
 	rq->lpb_size = mp->elt_size;
-	rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
+	if (roc_errata_nix_no_meta_aura())
+		rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
 
 	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
 	if (roc_nix_inl_inb_is_enabled(nix))
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 07/31] common/cnxk: delay inline device RQ enable to dev start
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (4 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 06/31] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 08/31] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
                     ` (23 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Similar to other RQ's, delay inline device rq until dev is started
to avoid traffic reception when device is stopped.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_idev.h    |  2 --
 drivers/common/cnxk/roc_nix_inl.c | 34 ++++++++++++++++++++++++++++---
 drivers/common/cnxk/roc_nix_inl.h |  5 ++++-
 drivers/common/cnxk/version.map   |  7 ++++---
 drivers/net/cnxk/cnxk_ethdev.c    | 14 ++++++++++++-
 5 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 7e0beed495..16793c2828 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -17,6 +17,4 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
 
-uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
-
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 603551bf83..c621867e54 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -245,6 +245,9 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
 	struct roc_cpt *roc_cpt;
 	struct roc_cpt_rxc_time_cfg cfg;
 
+	if (!idev)
+		return -EFAULT;
+
 	PLT_SET_USED(max_frags);
 	if (idev == NULL)
 		return -ENOTSUP;
@@ -587,7 +590,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
 }
 
 int
-roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
+roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 {
 	struct idev_cfg *idev = idev_get_cfg();
 	int port_id = rq->roc_nix->port_id;
@@ -688,9 +691,9 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 
 	/* Prepare and send RQ init mbox */
 	if (roc_model_is_cn9k())
-		rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
+		rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, enable);
 	else
-		rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
+		rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, enable);
 	if (rc) {
 		plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
 		return rc;
@@ -755,6 +758,31 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 	return rc;
 }
 
+int
+roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_nix_rq *inl_rq = roc_nix_inl_dev_rq(roc_nix);
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
+	int rc;
+
+	if (!idev)
+		return -EFAULT;
+
+	if (nix->inb_inl_dev) {
+		if (!inl_rq || !idev->nix_inl_dev)
+			return -EFAULT;
+
+		inl_dev = idev->nix_inl_dev;
+
+		rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
 void
 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
 {
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index c7b1817d7b..702ec01384 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -165,7 +165,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
 uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
 					   bool inl_dev_sa, uint32_t spi);
 void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
-int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq);
+int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
 int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
 bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
 struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
@@ -175,6 +175,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
 					   uint16_t max_frags);
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
 				       bool inb_inl_dev);
+int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
 
 /* NIX Inline Outbound API */
 int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@@ -189,6 +190,8 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
 					void *args);
 int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
+uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
 	ROC_NIX_INL_SA_OP_FLUSH,
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index a2d99e1f4a..6d43e37d1e 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -90,7 +90,6 @@ INTERNAL {
 	roc_hash_sha512_gen;
 	roc_idev_cpt_get;
 	roc_idev_cpt_set;
-	roc_nix_inl_outb_ring_base_get;
 	roc_idev_lmt_base_addr_get;
 	roc_idev_npa_maxpools_get;
 	roc_idev_npa_maxpools_set;
@@ -137,11 +136,13 @@ INTERNAL {
 	roc_nix_get_vwqe_interval;
 	roc_nix_inl_cb_register;
 	roc_nix_inl_cb_unregister;
+	roc_nix_inl_ctx_write;
 	roc_nix_inl_dev_dump;
 	roc_nix_inl_dev_fini;
 	roc_nix_inl_dev_init;
 	roc_nix_inl_dev_is_probed;
 	roc_nix_inl_dev_lock;
+	roc_nix_inl_dev_pffunc_get;
 	roc_nix_inl_dev_rq;
 	roc_nix_inl_dev_rq_get;
 	roc_nix_inl_dev_rq_put;
@@ -163,11 +164,11 @@ INTERNAL {
 	roc_nix_inl_outb_sa_base_get;
 	roc_nix_inl_outb_sso_pffunc_get;
 	roc_nix_inl_outb_is_enabled;
+	roc_nix_inl_outb_ring_base_get;
 	roc_nix_inl_outb_soft_exp_poll_switch;
+	roc_nix_inl_rq_ena_dis;
 	roc_nix_inl_sa_sync;
 	roc_nix_inl_ts_pkind_set;
-	roc_nix_inl_ctx_write;
-	roc_nix_inl_dev_pffunc_get;
 	roc_nix_inl_outb_cpt_lfs_dump;
 	roc_nix_cpt_ctx_cache_sync;
 	roc_nix_is_lbk;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index d90baabc4d..80ab3cfedd 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -660,7 +660,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
 
 		/* Setup rq reference for inline dev if present */
-		rc = roc_nix_inl_dev_rq_get(rq);
+		rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started);
 		if (rc)
 			goto free_mem;
 	}
@@ -1482,6 +1482,10 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
 
 	roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
 
+	/* Stop inline device RQ first */
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+		roc_nix_inl_rq_ena_dis(&dev->nix, false);
+
 	/* Stop rx queues and free up pkts pending */
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		rc = dev_ops->rx_queue_stop(eth_dev, i);
@@ -1527,6 +1531,14 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 			return rc;
 	}
 
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		rc = roc_nix_inl_rq_ena_dis(&dev->nix, true);
+		if (rc) {
+			plt_err("Failed to enable Inline device RQ, rc=%d", rc);
+			return rc;
+		}
+	}
+
 	/* Start tx queues  */
 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 		rc = cnxk_nix_tx_queue_start(eth_dev, i);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 08/31] common/cnxk: reserve aura zero on cn10ka NPA
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (5 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 07/31] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 09/31] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
                     ` (22 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella, Ashwin Sekhar T K, Pavan Nikhilesh
  Cc: jerinj, dev

Reserve aura id 0 on cn10k and provide mechanism to
specifically allocate it and free it via roc_npa_*
API's.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_dpi.c           |   2 +-
 drivers/common/cnxk/roc_nix_queue.c     |   2 +-
 drivers/common/cnxk/roc_npa.c           | 100 +++++++++++++++++++-----
 drivers/common/cnxk/roc_npa.h           |   6 +-
 drivers/common/cnxk/roc_npa_priv.h      |   1 +
 drivers/common/cnxk/roc_sso.c           |   2 +-
 drivers/common/cnxk/version.map         |   1 +
 drivers/mempool/cnxk/cnxk_mempool_ops.c |   7 +-
 8 files changed, 97 insertions(+), 24 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index 23b2cc41a4..93c8318a3d 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -75,7 +75,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi)
 
 	memset(&aura, 0, sizeof(aura));
 	rc = roc_npa_pool_create(&aura_handle, DPI_CMD_QUEUE_SIZE,
-				 DPI_CMD_QUEUE_BUFS, &aura, &pool);
+				 DPI_CMD_QUEUE_BUFS, &aura, &pool, 0);
 	if (rc) {
 		plt_err("Failed to create NPA pool, err %d\n", rc);
 		return rc;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 692b13415a..70b4516eca 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -713,7 +713,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	aura.fc_addr = (uint64_t)sq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
 	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
-				 &pool);
+				 &pool, 0);
 	if (rc)
 		goto fail;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 1e60f443f0..760a2315b2 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -260,16 +260,60 @@ bitmap_ctzll(uint64_t slab)
 	return __builtin_ctzll(slab);
 }
 
+static int
+find_free_aura(struct npa_lf *lf, uint32_t flags)
+{
+	struct plt_bitmap *bmp = lf->npa_bmp;
+	uint64_t aura0_state = 0;
+	uint64_t slab;
+	uint32_t pos;
+	int idx = -1;
+	int rc;
+
+	if (flags & ROC_NPA_ZERO_AURA_F) {
+		/* Only look for zero aura */
+		if (plt_bitmap_get(bmp, 0))
+			return 0;
+		plt_err("Zero aura already in use");
+		return -1;
+	}
+
+	if (lf->zero_aura_rsvd) {
+		/* Save and clear zero aura bit if needed */
+		aura0_state = plt_bitmap_get(bmp, 0);
+		if (aura0_state)
+			plt_bitmap_clear(bmp, 0);
+	}
+
+	pos = 0;
+	slab = 0;
+	/* Scan from the beginning */
+	plt_bitmap_scan_init(bmp);
+	/* Scan bitmap to get the free pool */
+	rc = plt_bitmap_scan(bmp, &pos, &slab);
+	/* Empty bitmap */
+	if (rc == 0) {
+		plt_err("Aura's exhausted");
+		goto empty;
+	}
+
+	idx = pos + bitmap_ctzll(slab);
+empty:
+	if (lf->zero_aura_rsvd && aura0_state)
+		plt_bitmap_set(bmp, 0);
+
+	return idx;
+}
+
 static int
 npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 			 const uint32_t block_count, struct npa_aura_s *aura,
-			 struct npa_pool_s *pool, uint64_t *aura_handle)
+			 struct npa_pool_s *pool, uint64_t *aura_handle,
+			 uint32_t flags)
 {
 	int rc, aura_id, pool_id, stack_size, alloc_size;
 	char name[PLT_MEMZONE_NAMESIZE];
 	const struct plt_memzone *mz;
-	uint64_t slab;
-	uint32_t pos;
 
 	/* Sanity check */
 	if (!lf || !block_size || !block_count || !pool || !aura ||
@@ -281,20 +325,11 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 	    block_size > ROC_NPA_MAX_BLOCK_SZ)
 		return NPA_ERR_INVALID_BLOCK_SZ;
 
-	pos = 0;
-	slab = 0;
-	/* Scan from the beginning */
-	plt_bitmap_scan_init(lf->npa_bmp);
-	/* Scan bitmap to get the free pool */
-	rc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab);
-	/* Empty bitmap */
-	if (rc == 0) {
-		plt_err("Mempools exhausted");
-		return NPA_ERR_AURA_ID_ALLOC;
-	}
-
 	/* Get aura_id from resource bitmap */
-	aura_id = pos + bitmap_ctzll(slab);
+	aura_id = find_free_aura(lf, flags);
+	if (aura_id < 0)
+		return NPA_ERR_AURA_ID_ALLOC;
+
 	/* Mark pool as reserved */
 	plt_bitmap_clear(lf->npa_bmp, aura_id);
 
@@ -374,7 +409,7 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 int
 roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 		    uint32_t block_count, struct npa_aura_s *aura,
-		    struct npa_pool_s *pool)
+		    struct npa_pool_s *pool, uint32_t flags)
 {
 	struct npa_aura_s defaura;
 	struct npa_pool_s defpool;
@@ -394,6 +429,11 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 		goto error;
 	}
 
+	if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
+		rc = NPA_ERR_ALLOC;
+		goto error;
+	}
+
 	if (aura == NULL) {
 		memset(&defaura, 0, sizeof(struct npa_aura_s));
 		aura = &defaura;
@@ -406,7 +446,7 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 	}
 
 	rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool,
-				      aura_handle);
+				      aura_handle, flags);
 	if (rc) {
 		plt_err("Failed to alloc pool or aura rc=%d", rc);
 		goto error;
@@ -522,6 +562,26 @@ roc_npa_pool_range_update_check(uint64_t aura_handle)
 	return 0;
 }
 
+uint64_t
+roc_npa_zero_aura_handle(void)
+{
+	struct idev_cfg *idev;
+	struct npa_lf *lf;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL)
+		return NPA_ERR_DEVICE_NOT_BOUNDED;
+
+	idev = idev_get_cfg();
+	if (idev == NULL)
+		return NPA_ERR_ALLOC;
+
+	/* Return aura handle only if reserved */
+	if (lf->zero_aura_rsvd)
+		return roc_npa_aura_handle_gen(0, lf->base);
+	return 0;
+}
+
 static inline int
 npa_attach(struct mbox *mbox)
 {
@@ -672,6 +732,10 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 	for (i = 0; i < nr_pools; i++)
 		plt_bitmap_set(lf->npa_bmp, i);
 
+	/* Reserve zero aura for all models other than CN9K */
+	if (!roc_model_is_cn9k())
+		lf->zero_aura_rsvd = true;
+
 	/* Allocate memory for qint context */
 	lf->npa_qint_mem = plt_zmalloc(sizeof(struct npa_qint) * nr_pools, 0);
 	if (lf->npa_qint_mem == NULL) {
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 59d13d88a1..69129cb4cc 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -711,10 +711,13 @@ struct roc_npa {
 int __roc_api roc_npa_dev_init(struct roc_npa *roc_npa);
 int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
 
+/* Flags to pool create */
+#define ROC_NPA_ZERO_AURA_F BIT(0)
+
 /* NPA pool */
 int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 				  uint32_t block_count, struct npa_aura_s *aura,
-				  struct npa_pool_s *pool);
+				  struct npa_pool_s *pool, uint32_t flags);
 int __roc_api roc_npa_aura_limit_modify(uint64_t aura_handle,
 					uint16_t aura_limit);
 int __roc_api roc_npa_pool_destroy(uint64_t aura_handle);
@@ -722,6 +725,7 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 					 uint64_t start_iova,
 					 uint64_t end_iova);
+uint64_t __roc_api roc_npa_zero_aura_handle(void);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h
index 5a02a61e00..de3d5448ba 100644
--- a/drivers/common/cnxk/roc_npa_priv.h
+++ b/drivers/common/cnxk/roc_npa_priv.h
@@ -32,6 +32,7 @@ struct npa_lf {
 	uint8_t aura_sz;
 	uint32_t qints;
 	uintptr_t base;
+	bool zero_aura_rsvd;
 };
 
 struct npa_qint {
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 126a9cba99..4bee5a97e1 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -473,7 +473,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
 	aura.fc_addr = (uint64_t)xaq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
 	rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
-				 &aura, &pool);
+				 &aura, &pool, 0);
 	if (rc) {
 		plt_err("Failed to create XAQ pool");
 		goto npa_fail;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6d43e37d1e..6c05e893e3 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -318,6 +318,7 @@ INTERNAL {
 	roc_npa_pool_destroy;
 	roc_npa_pool_op_pc_reset;
 	roc_npa_pool_range_update_check;
+	roc_npa_zero_aura_handle;
 	roc_npc_fini;
 	roc_npc_flow_create;
 	roc_npc_flow_destroy;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index c7b75f026d..a0b94bb95c 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,10 +72,10 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
 int
 cnxk_mempool_alloc(struct rte_mempool *mp)
 {
+	uint32_t block_count, flags = 0;
 	uint64_t aura_handle = 0;
 	struct npa_aura_s aura;
 	struct npa_pool_s pool;
-	uint32_t block_count;
 	size_t block_size;
 	int rc = -ERANGE;
 
@@ -100,8 +100,11 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
 	if (mp->pool_config != NULL)
 		memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
 
+	if (aura.ena && aura.pool_addr == 0)
+		flags = ROC_NPA_ZERO_AURA_F;
+
 	rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
-				 &pool);
+				 &pool, flags);
 	if (rc) {
 		plt_err("Failed to alloc pool or aura rc=%d", rc);
 		goto error;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 09/31] common/cnxk: add support to set NPA buf type
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (6 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 08/31] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
                     ` (21 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Add support to set/get per-aura buf type with refs and
get sum of all aura limits matching given buf type mask
and val.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/meson.build    |  1 +
 drivers/common/cnxk/roc_npa.c      | 11 ++++
 drivers/common/cnxk/roc_npa.h      | 22 +++++++
 drivers/common/cnxk/roc_npa_priv.h |  8 ++-
 drivers/common/cnxk/roc_npa_type.c | 99 ++++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map    |  3 +
 6 files changed, 143 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/cnxk/roc_npa_type.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 6f808271d1..127fcbcdc5 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -51,6 +51,7 @@ sources = files(
         'roc_npa.c',
         'roc_npa_debug.c',
         'roc_npa_irq.c',
+        'roc_npa_type.c',
         'roc_npc.c',
         'roc_npc_mcam.c',
         'roc_npc_mcam_dump.c',
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 760a2315b2..ee42434c38 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -499,6 +499,7 @@ npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle)
 	pool_id = aura_id;
 	rc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle);
 	rc |= npa_stack_dma_free(lf, name, pool_id);
+	memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr));
 
 	plt_bitmap_set(lf->npa_bmp, aura_id);
 
@@ -750,6 +751,13 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 		goto qint_free;
 	}
 
+	/* Allocate per-aura attribute */
+	lf->aura_attr = plt_zmalloc(sizeof(struct npa_aura_attr) * nr_pools, 0);
+	if (lf->aura_attr == NULL) {
+		rc = NPA_ERR_PARAM;
+		goto lim_free;
+	}
+
 	/* Init aura start & end limits */
 	for (i = 0; i < nr_pools; i++) {
 		lf->aura_lim[i].ptr_start = UINT64_MAX;
@@ -758,6 +766,8 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 
 	return 0;
 
+lim_free:
+	plt_free(lf->aura_lim);
 qint_free:
 	plt_free(lf->npa_qint_mem);
 bmap_free:
@@ -780,6 +790,7 @@ npa_dev_fini(struct npa_lf *lf)
 	plt_free(lf->npa_qint_mem);
 	plt_bitmap_free(lf->npa_bmp);
 	plt_free(lf->npa_bmp_mem);
+	plt_free(lf->aura_attr);
 
 	return npa_lf_free(lf->mbox);
 }
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 69129cb4cc..fed1942404 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -714,6 +714,25 @@ int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
 /* Flags to pool create */
 #define ROC_NPA_ZERO_AURA_F BIT(0)
 
+/* Enumerations */
+enum roc_npa_buf_type {
+	/* Aura used for normal pkts */
+	ROC_NPA_BUF_TYPE_PACKET = 0,
+	/* Aura used for ipsec pkts */
+	ROC_NPA_BUF_TYPE_PACKET_IPSEC,
+	/* Aura used as vwqe for normal pkts */
+	ROC_NPA_BUF_TYPE_VWQE,
+	/* Aura used as vwqe for ipsec pkts */
+	ROC_NPA_BUF_TYPE_VWQE_IPSEC,
+	/* Aura used as SQB for SQ */
+	ROC_NPA_BUF_TYPE_SQB,
+	/* Aura used for general buffer */
+	ROC_NPA_BUF_TYPE_BUF,
+	/* Aura used for timeout pool */
+	ROC_NPA_BUF_TYPE_TIMEOUT,
+	ROC_NPA_BUF_TYPE_END,
+};
+
 /* NPA pool */
 int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 				  uint32_t block_count, struct npa_aura_s *aura,
@@ -726,6 +745,9 @@ void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 					 uint64_t start_iova,
 					 uint64_t end_iova);
 uint64_t __roc_api roc_npa_zero_aura_handle(void);
+int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
+uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
+uint64_t __roc_api roc_npa_buf_type_limit_get(uint64_t type_mask);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h
index de3d5448ba..d2118cc4fb 100644
--- a/drivers/common/cnxk/roc_npa_priv.h
+++ b/drivers/common/cnxk/roc_npa_priv.h
@@ -18,6 +18,7 @@ enum npa_error_status {
 
 struct npa_lf {
 	struct plt_intr_handle *intr_handle;
+	struct npa_aura_attr *aura_attr;
 	struct npa_aura_lim *aura_lim;
 	struct plt_pci_device *pci_dev;
 	struct plt_bitmap *npa_bmp;
@@ -25,6 +26,7 @@ struct npa_lf {
 	uint32_t stack_pg_ptrs;
 	uint32_t stack_pg_bytes;
 	uint16_t npa_msixoff;
+	bool zero_aura_rsvd;
 	void *npa_qint_mem;
 	void *npa_bmp_mem;
 	uint32_t nr_pools;
@@ -32,7 +34,7 @@ struct npa_lf {
 	uint8_t aura_sz;
 	uint32_t qints;
 	uintptr_t base;
-	bool zero_aura_rsvd;
+
 };
 
 struct npa_qint {
@@ -45,6 +47,10 @@ struct npa_aura_lim {
 	uint64_t ptr_end;
 };
 
+struct npa_aura_attr {
+	int buf_type[ROC_NPA_BUF_TYPE_END];
+};
+
 struct dev;
 
 static inline struct npa *
diff --git a/drivers/common/cnxk/roc_npa_type.c b/drivers/common/cnxk/roc_npa_type.c
new file mode 100644
index 0000000000..ed90138944
--- /dev/null
+++ b/drivers/common/cnxk/roc_npa_type.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+int
+roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int count)
+{
+	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_lf *lf;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL || aura_id >= lf->nr_pools)
+		return NPA_ERR_PARAM;
+
+	if (plt_bitmap_get(lf->npa_bmp, aura_id)) {
+		plt_err("Cannot set buf type on unused aura");
+		return NPA_ERR_PARAM;
+	}
+
+	if (type >= ROC_NPA_BUF_TYPE_END || (lf->aura_attr[aura_id].buf_type[type] + count < 0)) {
+		plt_err("Pool buf type invalid");
+		return NPA_ERR_PARAM;
+	}
+
+	lf->aura_attr[aura_id].buf_type[type] += count;
+	plt_wmb();
+	return 0;
+}
+
+uint64_t
+roc_npa_buf_type_mask(uint64_t aura_handle)
+{
+	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	uint64_t type_mask = 0;
+	struct npa_lf *lf;
+	int type;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL || aura_id >= lf->nr_pools) {
+		plt_err("Invalid aura id or lf");
+		return 0;
+	}
+
+	if (plt_bitmap_get(lf->npa_bmp, aura_id)) {
+		plt_err("Cannot get buf_type on unused aura");
+		return 0;
+	}
+
+	for (type = 0; type < ROC_NPA_BUF_TYPE_END; type++) {
+		if (lf->aura_attr[aura_id].buf_type[type])
+			type_mask |= BIT_ULL(type);
+	}
+
+	return type_mask;
+}
+
+uint64_t
+roc_npa_buf_type_limit_get(uint64_t type_mask)
+{
+	uint64_t wdata, reg;
+	uint64_t limit = 0;
+	struct npa_lf *lf;
+	uint64_t aura_id;
+	int64_t *addr;
+	uint64_t val;
+	int type;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL)
+		return NPA_ERR_PARAM;
+
+	for (aura_id = 0; aura_id < lf->nr_pools; aura_id++) {
+		if (plt_bitmap_get(lf->npa_bmp, aura_id))
+			continue;
+
+		/* Find aura's matching the buf_types requested */
+		if (type_mask != 0) {
+			val = 0;
+			for (type = 0; type < ROC_NPA_BUF_TYPE_END; type++) {
+				if (lf->aura_attr[aura_id].buf_type[type] != 0)
+					val |= BIT_ULL(type);
+			}
+			if ((val & type_mask) == 0)
+				continue;
+		}
+
+		wdata = aura_id << 44;
+		addr = (int64_t *)(lf->base + NPA_LF_AURA_OP_LIMIT);
+		reg = roc_atomic64_add_nosync(wdata, addr);
+
+		if (!(reg & BIT_ULL(42)))
+			limit += (reg & ROC_AURA_OP_LIMIT_MASK);
+	}
+
+	return limit;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6c05e893e3..6f3de2ab59 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -306,6 +306,9 @@ INTERNAL {
 	roc_nix_vlan_mcam_entry_write;
 	roc_nix_vlan_strip_vtag_ena_dis;
 	roc_nix_vlan_tpid_set;
+	roc_npa_buf_type_mask;
+	roc_npa_buf_type_limit_get;
+	roc_npa_buf_type_update;
 	roc_npa_aura_drop_set;
 	roc_npa_aura_limit_modify;
 	roc_npa_aura_op_range_set;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (7 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 09/31] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 11/31] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
                     ` (20 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Update attributes to pools used by NIX so that we
can later identify which mempools are packet pools
and which are used for Inline IPsec enabled ethdev.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_queue.c | 112 +++++++++++++++++++++++++++-
 1 file changed, 110 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 70b4516eca..98b9fb45f5 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -140,6 +140,96 @@ roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid)
 	return sso_enable ? true : false;
 }
 
+static int
+nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set)
+{
+	struct roc_nix *roc_nix = rq->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix);
+	uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0;
+	struct mbox *mbox = nix->dev.mbox;
+	uint64_t aura_base;
+	int rc, count;
+
+	count = set ? 1 : -1;
+	/* For buf type set, use info from RQ context */
+	if (set) {
+		lpb_aura = rq->aura_handle;
+		spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0;
+		vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0;
+		goto skip_ctx_read;
+	}
+
+	aura_base = roc_npa_aura_handle_to_base(rq->aura_handle);
+	if (roc_model_is_cn9k()) {
+		struct nix_aq_enq_rsp *rsp;
+		struct nix_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+	} else {
+		struct nix_cn10k_aq_enq_rsp *rsp;
+		struct nix_cn10k_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+		if (rsp->rq.vwqe_ena)
+			vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base);
+	}
+
+skip_ctx_read:
+	/* Update attributes for LPB aura */
+	if (inl_inb_ena)
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+	else
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+
+	/* Update attributes for SPB aura */
+	if (spb_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+		else
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+	}
+
+	/* Update attributes for VWQE aura */
+	if (vwqe_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count);
+		else
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count);
+	}
+
+	return 0;
+}
+
 int
 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		bool cfg, bool ena)
@@ -292,7 +382,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 			/* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
 			aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
 			aq->rq.vtime_wait = rq->vwqe_wait_tmo;
-			aq->rq.wqe_aura = rq->vwqe_aura_handle;
+			aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle);
 		}
 	} else {
 		/* CQ mode */
@@ -463,6 +553,9 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura buf type to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
@@ -481,6 +574,9 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rq->qid >= nix->nb_rx_queues)
 		return NIX_ERR_QUEUE_INVALID_RANGE;
 
+	/* Clear attributes for existing aura's */
+	nix_rq_aura_buf_type_update(rq, false);
+
 	rq->roc_nix = roc_nix;
 
 	if (is_cn9k)
@@ -495,14 +591,25 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura attribute to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
 int
 roc_nix_rq_fini(struct roc_nix_rq *rq)
 {
+	int rc;
+
 	/* Disabling RQ is sufficient */
-	return roc_nix_rq_ena_dis(rq, false);
+	rc = roc_nix_rq_ena_dis(rq, false);
+	if (rc)
+		return rc;
+
+	/* Update aura attribute to indicate its use for */
+	nix_rq_aura_buf_type_update(rq, false);
+	return 0;
 }
 
 int
@@ -717,6 +824,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	if (rc)
 		goto fail;
 
+	roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1);
 	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
 	if (sq->sqe_mem == NULL) {
 		rc = NIX_ERR_NO_MEM;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 11/31] common/cnxk: support zero aura for inline inbound meta
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (8 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
                     ` (19 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Add support to create zero aura for inline inbound meta pkts when platform
supports it. Aura zero will hold as many buffers as all the available
pkt pool with a data to accommodate 384B in best case to store
meta packets coming from Inline IPsec.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_idev.c         |  10 ++
 drivers/common/cnxk/roc_idev.h         |   1 +
 drivers/common/cnxk/roc_idev_priv.h    |   9 ++
 drivers/common/cnxk/roc_nix.h          |   1 +
 drivers/common/cnxk/roc_nix_inl.c      | 211 +++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.h      |   8 +
 drivers/common/cnxk/roc_nix_inl_dev.c  |   2 +
 drivers/common/cnxk/roc_nix_inl_priv.h |   4 +
 drivers/common/cnxk/roc_nix_priv.h     |   1 +
 drivers/common/cnxk/roc_nix_queue.c    |  19 +++
 drivers/common/cnxk/version.map        |   4 +
 11 files changed, 270 insertions(+)

diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index a08c7ce8fd..4d2eff93ce 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -241,3 +241,13 @@ idev_sso_set(struct roc_sso *sso)
 	if (idev != NULL)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
+
+uint64_t
+roc_idev_nix_inl_meta_aura_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->inl_cfg.meta_aura;
+	return 0;
+}
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 16793c2828..926aac0634 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -16,5 +16,6 @@ struct roc_cpt *__roc_api roc_idev_cpt_get(void);
 void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
+uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
 
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 46eebffcbb..315cc6f52c 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -10,6 +10,14 @@ struct npa_lf;
 struct roc_bphy;
 struct roc_cpt;
 struct nix_inl_dev;
+
+struct idev_nix_inl_cfg {
+	uint64_t meta_aura;
+	uint32_t nb_bufs;
+	uint32_t buf_sz;
+	uint32_t refs;
+};
+
 struct idev_cfg {
 	uint16_t sso_pf_func;
 	uint16_t npa_pf_func;
@@ -23,6 +31,7 @@ struct idev_cfg {
 	struct roc_cpt *cpt;
 	struct roc_sso *sso;
 	struct nix_inl_dev *nix_inl_dev;
+	struct idev_nix_inl_cfg inl_cfg;
 	plt_spinlock_t nix_inl_dev_lock;
 };
 
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index c9aaedc915..77e4d2919b 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -321,6 +321,7 @@ struct roc_nix_rq {
 	bool spb_drop_ena;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
+	uint64_t meta_aura_handle;
 	uint16_t inl_dev_refs;
 };
 
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index c621867e54..507a15315a 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -6,6 +6,7 @@
 #include "roc_priv.h"
 
 uint32_t soft_exp_consumer_cnt;
+roc_nix_inl_meta_pool_cb_t meta_pool_cb;
 
 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
@@ -18,6 +19,155 @@ PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
 		  1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
 
+static int
+nix_inl_meta_aura_destroy(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	int rc;
+
+	if (!idev)
+		return -EINVAL;
+
+	inl_cfg = &idev->inl_cfg;
+	/* Destroy existing Meta aura */
+	if (inl_cfg->meta_aura) {
+		uint64_t avail, limit;
+
+		/* Check if all buffers are back to pool */
+		avail = roc_npa_aura_op_available(inl_cfg->meta_aura);
+		limit = roc_npa_aura_op_limit_get(inl_cfg->meta_aura);
+		if (avail != limit)
+			plt_warn("Not all buffers are back to meta pool,"
+				 " %" PRIu64 " != %" PRIu64, avail, limit);
+
+		rc = meta_pool_cb(&inl_cfg->meta_aura, 0, 0, true);
+		if (rc) {
+			plt_err("Failed to destroy meta aura, rc=%d", rc);
+			return rc;
+		}
+		inl_cfg->meta_aura = 0;
+		inl_cfg->buf_sz = 0;
+		inl_cfg->nb_bufs = 0;
+		inl_cfg->refs = 0;
+	}
+
+	return 0;
+}
+
+static int
+nix_inl_meta_aura_create(struct idev_cfg *idev, uint16_t first_skip)
+{
+	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	struct idev_nix_inl_cfg *inl_cfg;
+	struct nix_inl_dev *nix_inl_dev;
+	uint32_t nb_bufs, buf_sz;
+	int rc;
+
+	inl_cfg = &idev->inl_cfg;
+	nix_inl_dev = idev->nix_inl_dev;
+
+	/* Override meta buf count from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+		nb_bufs = nix_inl_dev->nb_meta_bufs;
+	else
+		nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+	/* Override meta buf size from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+		buf_sz = nix_inl_dev->meta_buf_sz;
+	else
+		buf_sz = first_skip + NIX_INL_META_SIZE;
+
+	/* Allocate meta aura */
+	rc = meta_pool_cb(&inl_cfg->meta_aura, buf_sz, nb_bufs, false);
+	if (rc) {
+		plt_err("Failed to allocate meta aura, rc=%d", rc);
+		return rc;
+	}
+
+	inl_cfg->buf_sz = buf_sz;
+	inl_cfg->nb_bufs = nb_bufs;
+	return 0;
+}
+
+int
+roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	uint32_t actual, expected;
+	uint64_t mask, type_mask;
+	int rc;
+
+	if (!idev || !meta_pool_cb)
+		return -EFAULT;
+	inl_cfg = &idev->inl_cfg;
+
+	/* Create meta aura if not present */
+	if (!inl_cfg->meta_aura) {
+		rc = nix_inl_meta_aura_create(idev, rq->first_skip);
+		if (rc)
+			return rc;
+	}
+
+	/* Validate if we have enough meta buffers */
+	mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	expected = roc_npa_buf_type_limit_get(mask);
+	actual = inl_cfg->nb_bufs;
+
+	if (actual < expected) {
+		plt_err("Insufficient buffers in meta aura %u < %u (expected)",
+			actual, expected);
+		return -EIO;
+	}
+
+	/* Validate if we have enough space for meta buffer */
+	if (rq->first_skip + NIX_INL_META_SIZE > inl_cfg->buf_sz) {
+		plt_err("Meta buffer size %u not sufficient to meet RQ first skip %u",
+			inl_cfg->buf_sz, rq->first_skip);
+		return -EIO;
+	}
+
+	/* Validate if we have enough VWQE buffers */
+	if (rq->vwqe_ena) {
+		actual = roc_npa_aura_op_limit_get(rq->vwqe_aura_handle);
+
+		type_mask = roc_npa_buf_type_mask(rq->vwqe_aura_handle);
+		if (type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE_IPSEC) &&
+		    type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE)) {
+			/* VWQE aura shared b/w Inline enabled and non Inline
+			 * enabled ports needs enough buffers to store all the
+			 * packet buffers, one per vwqe.
+			 */
+			mask = (BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC) |
+				BIT_ULL(ROC_NPA_BUF_TYPE_PACKET));
+			expected = roc_npa_buf_type_limit_get(mask);
+
+			if (actual < expected) {
+				plt_err("VWQE aura shared b/w Inline inbound and non-Inline inbound "
+					"ports needs vwqe bufs(%u) minimum of all pkt bufs (%u)",
+					actual, expected);
+				return -EIO;
+			}
+		} else {
+			/* VWQE aura not shared b/w Inline and non Inline ports have relaxed
+			 * requirement of match all the meta buffers.
+			 */
+			expected = inl_cfg->nb_bufs;
+
+			if (actual < expected) {
+				plt_err("VWQE aura not shared b/w Inline inbound and non-Inline "
+					"ports needs vwqe bufs(%u) minimum of all meta bufs (%u)",
+					actual, expected);
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 {
@@ -310,6 +460,10 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	if (rc)
 		return rc;
 
+	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+		nix->need_meta_aura = true;
+		idev->inl_cfg.refs++;
+	}
 	nix->inl_inb_ena = true;
 	return 0;
 }
@@ -317,12 +471,22 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 int
 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 {
+	struct idev_cfg *idev = idev_get_cfg();
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 
 	if (!nix->inl_inb_ena)
 		return 0;
 
+	if (!idev)
+		return -EFAULT;
+
 	nix->inl_inb_ena = false;
+	if (nix->need_meta_aura) {
+		nix->need_meta_aura = false;
+		idev->inl_cfg.refs--;
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy();
+	}
 
 	/* Flush Inbound CTX cache entries */
 	roc_nix_cpt_ctx_cache_sync(roc_nix);
@@ -592,6 +756,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
 int
 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 {
+	struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	int port_id = rq->roc_nix->port_id;
 	struct nix_inl_dev *inl_dev;
@@ -603,6 +768,10 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 	if (idev == NULL)
 		return 0;
 
+	/* Update meta aura handle in RQ */
+	if (nix->need_meta_aura)
+		rq->meta_aura_handle = roc_npa_zero_aura_handle();
+
 	inl_dev = idev->nix_inl_dev;
 	/* Nothing to do if no inline device */
 	if (!inl_dev)
@@ -705,6 +874,13 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 		return rc;
 	}
 
+	/* Check meta aura */
+	if (enable && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	inl_rq->inl_dev_refs++;
 	rq->inl_dev_refs = 1;
 	return 0;
@@ -724,6 +900,7 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 	if (idev == NULL)
 		return 0;
 
+	rq->meta_aura_handle = 0;
 	if (!rq->inl_dev_refs)
 		return 0;
 
@@ -779,6 +956,9 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
 		rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
 		if (rc)
 			return rc;
+
+		if (enable && nix->need_meta_aura)
+			return roc_nix_inl_meta_aura_check(inl_rq);
 	}
 	return 0;
 }
@@ -792,6 +972,31 @@ roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
 	nix->inb_inl_dev = use_inl_dev;
 }
 
+void
+roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (!idev)
+		return;
+	/* Need to set here for cases when inbound SA table is
+	 * managed outside RoC.
+	 */
+	nix->inl_inb_ena = ena;
+	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+		if (ena) {
+			nix->need_meta_aura = true;
+			idev->inl_cfg.refs++;
+		} else if (nix->need_meta_aura) {
+			nix->need_meta_aura = false;
+			idev->inl_cfg.refs--;
+			if (!idev->inl_cfg.refs)
+				nix_inl_meta_aura_destroy();
+		}
+	}
+}
+
 int
 roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
 {
@@ -1128,3 +1333,9 @@ roc_nix_inl_dev_unlock(void)
 	if (idev != NULL)
 		plt_spinlock_unlock(&idev->nix_inl_dev_lock);
 }
+
+void
+roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
+{
+	meta_pool_cb = cb;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 702ec01384..9911a48b2d 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -121,6 +121,9 @@ roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(void *sa)
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 					  uint32_t soft_exp_event);
 
+typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle, uint32_t blk_sz, uint32_t nb_bufs,
+					  bool destroy);
+
 struct roc_nix_inl_dev {
 	/* Input parameters */
 	struct plt_pci_device *pci_dev;
@@ -135,6 +138,8 @@ struct roc_nix_inl_dev {
 	uint8_t spb_drop_pc;
 	uint8_t lpb_drop_pc;
 	bool set_soft_exp_poll;
+	uint32_t nb_meta_bufs;
+	uint32_t meta_buf_sz;
 	/* End of input parameters */
 
 #define ROC_NIX_INL_MEM_SZ (1280)
@@ -165,6 +170,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
 uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
 					   bool inl_dev_sa, uint32_t spi);
 void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
+void __roc_api roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena);
 int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
 int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
 bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
@@ -176,6 +182,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
 				       bool inb_inl_dev);
 int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
+int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq);
 
 /* NIX Inline Outbound API */
 int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@@ -191,6 +198,7 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
 int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
 uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
 
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 3a96498d64..1e9b2b95d7 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -841,6 +841,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
 	inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
 	inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
+	inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
+	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
 
 	if (roc_inl_dev->spb_drop_pc)
 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index a775efc637..ccd2adf982 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -6,6 +6,8 @@
 #include <pthread.h>
 #include <sys/types.h>
 
+#define NIX_INL_META_SIZE 384u
+
 struct nix_inl_dev;
 struct nix_inl_qint {
 	struct nix_inl_dev *inl_dev;
@@ -86,6 +88,8 @@ struct nix_inl_dev {
 	bool attach_cptlf;
 	uint16_t wqe_skip;
 	bool ts_ena;
+	uint32_t nb_meta_bufs;
+	uint32_t meta_buf_sz;
 };
 
 int nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index a3d4ddf5d5..a253f412de 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -202,6 +202,7 @@ struct nix {
 	uint16_t nb_cpt_lf;
 	uint16_t outb_se_ring_cnt;
 	uint16_t outb_se_ring_base;
+	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
 
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 98b9fb45f5..b197de0a77 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -89,7 +89,12 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
 
 	rc = nix_rq_ena_dis(&nix->dev, rq, enable);
 	nix_rq_vwqe_flush(rq, nix->vwqe_interval);
+	if (rc)
+		return rc;
 
+	/* Check for meta aura if RQ is enabled */
+	if (enable && nix->need_meta_aura)
+		rc = roc_nix_inl_meta_aura_check(rq);
 	return rc;
 }
 
@@ -556,6 +561,13 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	/* Update aura buf type to indicate its use */
 	nix_rq_aura_buf_type_update(rq, true);
 
+	/* Check for meta aura if RQ is enabled */
+	if (ena && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	return nix_tel_node_add_rq(rq);
 }
 
@@ -594,6 +606,13 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	/* Update aura attribute to indicate its use */
 	nix_rq_aura_buf_type_update(rq, true);
 
+	/* Check for meta aura if RQ is enabled */
+	if (ena && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	return nix_tel_node_add_rq(rq);
 }
 
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6f3de2ab59..276fec3660 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -95,6 +95,7 @@ INTERNAL {
 	roc_idev_npa_maxpools_set;
 	roc_idev_npa_nix_get;
 	roc_idev_num_lmtlines_get;
+	roc_idev_nix_inl_meta_aura_get;
 	roc_model;
 	roc_se_auth_key_set;
 	roc_se_ciph_key_set;
@@ -156,7 +157,10 @@ INTERNAL {
 	roc_nix_inl_inb_sa_sz;
 	roc_nix_inl_inb_tag_update;
 	roc_nix_inl_inb_fini;
+	roc_nix_inl_inb_set;
 	roc_nix_inb_is_with_inl_dev;
+	roc_nix_inl_meta_aura_check;
+	roc_nix_inl_meta_pool_cb_register;
 	roc_nix_inb_mode_set;
 	roc_nix_inl_outb_fini;
 	roc_nix_inl_outb_init;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (9 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 11/31] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 13/31] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
                     ` (18 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Nithin Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Add support for zero aura for inline meta pkts and register
callback to ROC to create meta pool via mempool. Also
add devargs to override meta buffer count and size.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c      |  8 +-
 drivers/event/cnxk/cn10k_worker.h        | 32 ++++----
 drivers/event/cnxk/cnxk_eventdev.h       |  1 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  2 +-
 drivers/net/cnxk/cn10k_ethdev.c          |  8 +-
 drivers/net/cnxk/cn10k_ethdev.h          |  2 +-
 drivers/net/cnxk/cn10k_rx.h              | 35 +++++----
 drivers/net/cnxk/cnxk_ethdev.c           |  3 +
 drivers/net/cnxk/cnxk_ethdev.h           |  2 +
 drivers/net/cnxk/cnxk_ethdev_sec.c       | 97 +++++++++++++++++++++++-
 10 files changed, 154 insertions(+), 36 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index fee01713b4..1774455b4c 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -694,7 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
 }
 
 static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, uint64_t meta_aura)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int i;
@@ -703,6 +703,8 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 		struct cn10k_sso_hws *ws = event_dev->data->ports[i];
 		ws->lookup_mem = lookup_mem;
 		ws->tstamp = dev->tstamp;
+		if (meta_aura)
+			ws->meta_aura = meta_aura;
 	}
 }
 
@@ -713,6 +715,7 @@ cn10k_sso_rx_adapter_queue_add(
 	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct cn10k_eth_rxq *rxq;
+	uint64_t meta_aura;
 	void *lookup_mem;
 	int rc;
 
@@ -726,7 +729,8 @@ cn10k_sso_rx_adapter_queue_add(
 		return -EINVAL;
 	rxq = eth_dev->data->rx_queues[0];
 	lookup_mem = rxq->lookup_mem;
-	cn10k_sso_set_priv_mem(event_dev, lookup_mem);
+	meta_aura = rxq->meta_aura;
+	cn10k_sso_set_priv_mem(event_dev, lookup_mem, meta_aura);
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
 	return 0;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index db56d96404..47ce423da2 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -127,12 +127,14 @@ cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
 }
 
 static __rte_always_inline void
-cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
-		   void *lookup_mem, void *tstamp, uintptr_t lbase)
+cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struct cn10k_sso_hws *ws)
 {
 	uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
+	struct cnxk_timesync_info *tstamp = ws->tstamp[port_id];
+	void *lookup_mem = ws->lookup_mem;
+	uintptr_t lbase = ws->lmt_base;
 	struct rte_event_vector *vec;
-	uint64_t aura_handle, laddr;
+	uint64_t meta_aura, laddr;
 	uint16_t nb_mbufs, non_vec;
 	uint16_t lmt_id, d_off;
 	struct rte_mbuf **wqe;
@@ -153,25 +155,31 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 	if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
 		mbuf_init |= 8;
 
+	meta_aura = ws->meta_aura;
 	nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
 	nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
-					      flags | NIX_RX_VWQE_F, lookup_mem,
-					      tstamp, lbase);
+					      flags | NIX_RX_VWQE_F,
+					      lookup_mem, tstamp,
+					      lbase, meta_aura);
 	wqe += nb_mbufs;
 	non_vec = vec->nb_elem - nb_mbufs;
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
+		uint64_t sg_w1;
+
 		mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] -
 					   sizeof(struct rte_mbuf));
 		/* Pick first mbuf's aura handle assuming all
 		 * mbufs are from a vec and are from same RQ.
 		 */
-		aura_handle = mbuf->pool->pool_id;
+		meta_aura = ws->meta_aura;
+		if (!meta_aura)
+			meta_aura = mbuf->pool->pool_id;
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 		laddr = lbase;
 		laddr += 8;
-		d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
-		d_off += (mbuf_init & 0xFFFF);
+		sg_w1 = *(uint64_t *)(((uintptr_t)wqe[0]) + 72);
+		d_off = sg_w1 - (uintptr_t)mbuf;
 		sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
 		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	}
@@ -208,7 +216,7 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 
 	/* Free remaining meta buffers if any */
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
-		nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle);
+		nix_sec_flush_meta(laddr, lmt_id, loff, meta_aura);
 		plt_io_wmb();
 	}
 }
@@ -241,8 +249,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 			uint64_t cq_w5;
 
 			m = (struct rte_mbuf *)mbuf;
-			d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
-			d_off += RTE_PKTMBUF_HEADROOM;
+			d_off = (*(uint64_t *)(u64[1] + 72)) - (uintptr_t)m;
 
 			cq_w1 = *(uint64_t *)(u64[1] + 8);
 			cq_w5 = *(uint64_t *)(u64[1] + 40);
@@ -273,8 +280,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
 			   ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
 		*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
-		cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
-				   ws->tstamp[port], ws->lmt_base);
+		cn10k_process_vwqe(u64[1], port, flags, ws);
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index fae4484758..d61e60dd2d 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -148,6 +148,7 @@ struct cn10k_sso_hws {
 	uint8_t hws_id;
 	/* PTP timestamp */
 	struct cnxk_timesync_info **tstamp;
+	uint64_t meta_aura;
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 7937cadd25..5f51c504b5 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -194,7 +194,7 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
 
 	rq->vwqe_ena = 1;
 	rq->vwqe_first_skip = 0;
-	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
+	rq->vwqe_aura_handle = vmp->pool_id;
 	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
 	rq->vwqe_wait_tmo =
 		tmo_ns /
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 80c5c0e962..e8faeebe1f 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -282,9 +282,13 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		rxq->lmt_base = dev->nix.lmt_base;
 		rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
 							   dev->inb.inl_dev);
+		rxq->meta_aura = rq->meta_aura_handle;
+		rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+		/* Assume meta packet from normal aura if meta aura is not setup
+		 */
+		if (!rxq->meta_aura)
+			rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
 	}
-	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
-	rxq->aura_handle = rxq_sp->qconf.mp->pool_id;
 
 	/* Lookup mem */
 	rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h
index acfdbb66aa..d0a5b136e3 100644
--- a/drivers/net/cnxk/cn10k_ethdev.h
+++ b/drivers/net/cnxk/cn10k_ethdev.h
@@ -39,7 +39,7 @@ struct cn10k_eth_rxq {
 	uint16_t data_off;
 	uint64_t sa_base;
 	uint64_t lmt_base;
-	uint64_t aura_handle;
+	uint64_t meta_aura;
 	uint16_t rq;
 	struct cnxk_timesync_info *tstamp;
 } __plt_cache_aligned;
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 0f8790b8c7..2cd297eb82 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -877,7 +877,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
-		aura_handle = rxq->aura_handle;
+		aura_handle = rxq->meta_aura;
 		sa_base = rxq->sa_base;
 		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
@@ -984,7 +984,7 @@ static __rte_always_inline uint16_t
 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			   const uint16_t flags, void *lookup_mem,
 			   struct cnxk_timesync_info *tstamp,
-			   uintptr_t lmt_base)
+			   uintptr_t lmt_base, uint64_t meta_aura)
 {
 	struct cn10k_eth_rxq *rxq = args;
 	const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
@@ -1003,10 +1003,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
 	uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
-	uint64_t aura_handle, lbase, laddr;
 	uint8_t loff = 0, lnum = 0, shft = 0;
 	uint8x16_t f0, f1, f2, f3;
 	uint16_t lmt_id, d_off;
+	uint64_t lbase, laddr;
 	uint16_t packets = 0;
 	uint16_t pkts_left;
 	uintptr_t sa_base;
@@ -1035,6 +1035,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
 		if (flags & NIX_RX_VWQE_F) {
+			uint64_t sg_w1;
 			uint16_t port;
 
 			mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
@@ -1042,10 +1043,15 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			/* Pick first mbuf's aura handle assuming all
 			 * mbufs are from a vec and are from same RQ.
 			 */
-			aura_handle = mbuf0->pool->pool_id;
+			if (!meta_aura)
+				meta_aura = mbuf0->pool->pool_id;
 			/* Calculate offset from mbuf to actual data area */
-			d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
-			d_off += (mbuf_initializer & 0xFFFF);
+			/* Zero aura's first skip i.e mbuf setup might not match the actual
+			 * offset as first skip is taken from second pass RQ. So compute
+			 * using diff b/w first SG pointer and mbuf addr.
+			 */
+			sg_w1 = *(uint64_t *)((uintptr_t)mbufs[0] + 72);
+			d_off = (sg_w1 - (uint64_t)mbuf0);
 
 			/* Get SA Base from lookup tbl using port_id */
 			port = mbuf_initializer >> 48;
@@ -1053,7 +1059,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 
 			lbase = lmt_base;
 		} else {
-			aura_handle = rxq->aura_handle;
+			meta_aura = rxq->meta_aura;
 			d_off = rxq->data_off;
 			sa_base = rxq->sa_base;
 			lbase = rxq->lmt_base;
@@ -1721,7 +1727,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				/* Update aura handle */
 				*(uint64_t *)(laddr - 8) =
 					(((uint64_t)(15 & 0x1) << 32) |
-				    roc_npa_aura_handle_to_aura(aura_handle));
+				    roc_npa_aura_handle_to_aura(meta_aura));
 				loff = loff - 15;
 				shft += 3;
 
@@ -1744,14 +1750,14 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				/* Update aura handle */
 				*(uint64_t *)(laddr - 8) =
 					(((uint64_t)(loff & 0x1) << 32) |
-				    roc_npa_aura_handle_to_aura(aura_handle));
+				    roc_npa_aura_handle_to_aura(meta_aura));
 
 				data = (data & ~(0x7UL << shft)) |
 				       (((uint64_t)loff >> 1) << shft);
 
 				/* Send up to 16 lmt lines of pointers */
 				nix_sec_flush_meta_burst(lmt_id, data, lnum + 1,
-							 aura_handle);
+							 meta_aura);
 				rte_io_wmb();
 				lnum = 0;
 				loff = 0;
@@ -1769,13 +1775,13 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		/* Update aura handle */
 		*(uint64_t *)(laddr - 8) =
 			(((uint64_t)(loff & 0x1) << 32) |
-			 roc_npa_aura_handle_to_aura(aura_handle));
+			 roc_npa_aura_handle_to_aura(meta_aura));
 
 		data = (data & ~(0x7UL << shft)) |
 		       (((uint64_t)loff >> 1) << shft);
 
 		/* Send up to 16 lmt lines of pointers */
-		nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, aura_handle);
+		nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, meta_aura);
 		if (flags & NIX_RX_VWQE_F)
 			plt_io_wmb();
 	}
@@ -1803,7 +1809,7 @@ static inline uint16_t
 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			   const uint16_t flags, void *lookup_mem,
 			   struct cnxk_timesync_info *tstamp,
-			   uintptr_t lmt_base)
+			   uintptr_t lmt_base, uint64_t meta_aura)
 {
 	RTE_SET_USED(args);
 	RTE_SET_USED(mbufs);
@@ -1812,6 +1818,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	RTE_SET_USED(lookup_mem);
 	RTE_SET_USED(tstamp);
 	RTE_SET_USED(lmt_base);
+	RTE_SET_USED(meta_aura);
 
 	return 0;
 }
@@ -2038,7 +2045,7 @@ NIX_RX_FASTPATH_MODES
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
 	{                                                                      \
 		return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts,     \
-						  (flags), NULL, NULL, 0);     \
+						  (flags), NULL, NULL, 0, 0);  \
 	}
 
 #define NIX_RX_RECV_VEC_MSEG(fn, flags)                                        \
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 80ab3cfedd..85ad70e50b 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1732,6 +1732,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	roc_nix_mac_link_info_get_cb_register(nix,
 					      cnxk_eth_dev_link_status_get_cb);
 
+	/* Register callback for inline meta pool create */
+	roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
+
 	dev->eth_dev = eth_dev;
 	dev->configured = 0;
 	dev->ptype_disable = 0;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index f11a9a0b63..a4178cfeff 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -642,6 +642,8 @@ struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
 struct cnxk_eth_sec_sess *
 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
 			      struct rte_security_session *sess);
+int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+			      bool destroy);
 
 /* Other private functions */
 int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 1de3454398..9304b1465d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -4,10 +4,14 @@
 
 #include <cnxk_ethdev.h>
 
+#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
+
 #define CNXK_NIX_INL_SELFTEST	      "selftest"
 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
 #define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
 #define CNXK_INL_CPT_CHANNEL	      "inl_cpt_channel"
+#define CNXK_NIX_INL_NB_META_BUFS     "nb_meta_bufs"
+#define CNXK_NIX_INL_META_BUF_SZ      "meta_buf_sz"
 
 struct inl_cpt_channel {
 	bool is_multi_channel;
@@ -28,6 +32,85 @@ bitmap_ctzll(uint64_t slab)
 	return __builtin_ctzll(slab);
 }
 
+int
+cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs, bool destroy)
+{
+	const char *mp_name = CNXK_NIX_INL_META_POOL_NAME;
+	struct rte_pktmbuf_pool_private mbp_priv;
+	struct npa_aura_s *aura;
+	struct rte_mempool *mp;
+	uint16_t first_skip;
+	int rc;
+
+	/* Destroy the mempool if requested */
+	if (destroy) {
+		mp = rte_mempool_lookup(mp_name);
+		if (!mp)
+			return -ENOENT;
+
+		if (mp->pool_id != *aura_handle) {
+			plt_err("Meta pool aura mismatch");
+			return -EINVAL;
+		}
+
+		plt_free(mp->pool_config);
+		rte_mempool_free(mp);
+
+		*aura_handle = 0;
+		return 0;
+	}
+
+	/* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
+	 * support.
+	 */
+	mp = rte_mempool_create_empty(mp_name, nb_bufs, buf_sz, 0,
+				      sizeof(struct rte_pktmbuf_pool_private),
+				      SOCKET_ID_ANY, 0);
+	if (!mp) {
+		plt_err("Failed to create inline meta pool");
+		return -EIO;
+	}
+
+	/* Indicate to allocate zero aura */
+	aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
+	if (!aura) {
+		rc = -ENOMEM;
+		goto free_mp;
+	}
+	aura->ena = 1;
+	aura->pool_addr = 0x0;
+
+	rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+					aura);
+	if (rc) {
+		plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
+		goto free_aura;
+	}
+
+	/* Init mempool private area */
+	first_skip = sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+	memset(&mbp_priv, 0, sizeof(mbp_priv));
+	mbp_priv.mbuf_data_room_size = (buf_sz - first_skip +
+					RTE_PKTMBUF_HEADROOM);
+	rte_pktmbuf_pool_init(mp, &mbp_priv);
+
+	/* Populate buffer */
+	rc = rte_mempool_populate_default(mp);
+	if (rc < 0) {
+		plt_err("Failed to create inline meta pool, rc=%d", rc);
+		goto free_aura;
+	}
+
+	rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
+	*aura_handle = mp->pool_id;
+	return 0;
+free_aura:
+	plt_free(aura);
+free_mp:
+	rte_mempool_free(mp);
+	return rc;
+}
+
 int
 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
 			 uint32_t spi)
@@ -128,7 +211,7 @@ struct rte_security_ops cnxk_eth_sec_ops = {
 };
 
 static int
-parse_ipsec_in_spi_range(const char *key, const char *value, void *extra_args)
+parse_val_u32(const char *key, const char *value, void *extra_args)
 {
 	RTE_SET_USED(key);
 	uint32_t val;
@@ -184,6 +267,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	uint32_t ipsec_in_min_spi = 0;
 	struct inl_cpt_channel cpt_channel;
 	struct rte_kvargs *kvlist;
+	uint32_t nb_meta_bufs = 0;
+	uint32_t meta_buf_sz = 0;
 	uint8_t selftest = 0;
 
 	memset(&cpt_channel, 0, sizeof(cpt_channel));
@@ -198,11 +283,15 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
 			   &selftest);
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MIN_SPI,
-			   &parse_ipsec_in_spi_range, &ipsec_in_min_spi);
+			   &parse_val_u32, &ipsec_in_min_spi);
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
-			   &parse_ipsec_in_spi_range, &ipsec_in_max_spi);
+			   &parse_val_u32, &ipsec_in_max_spi);
 	rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel,
 			   &cpt_channel);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_NB_META_BUFS, &parse_val_u32,
+			   &nb_meta_bufs);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_META_BUF_SZ, &parse_val_u32,
+			   &meta_buf_sz);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -212,6 +301,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	inl_dev->channel = cpt_channel.channel;
 	inl_dev->chan_mask = cpt_channel.mask;
 	inl_dev->is_multi_channel = cpt_channel.is_multi_channel;
+	inl_dev->nb_meta_bufs = nb_meta_bufs;
+	inl_dev->meta_buf_sz = meta_buf_sz;
 	return 0;
 exit:
 	return -EINVAL;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 13/31] common/cnxk: avoid the use of platform specific APIs
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (10 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 14/31] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
                     ` (17 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Replace the use of platform specific APIs with platform independent
APIs.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/roc_cpt.c    | 8 ++++----
 drivers/common/cnxk/roc_cpt.h    | 2 +-
 drivers/crypto/cnxk/cn9k_ipsec.c | 8 ++++----
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index d607bde3c4..6f0ee44b54 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -998,7 +998,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 }
 
 int
-roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
 		     uint16_t ctx_len, uint8_t egrp)
 {
 	union cpt_res_s res, *hw_res;
@@ -1019,9 +1019,9 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
 	inst.w4.s.param1 = 0;
 	inst.w4.s.param2 = 0;
 	inst.w4.s.dlen = ctx_len;
-	inst.dptr = rte_mempool_virt2iova(sa);
+	inst.dptr = sa;
 	inst.rptr = 0;
-	inst.w7.s.cptr = rte_mempool_virt2iova(sa);
+	inst.w7.s.cptr = sa;
 	inst.w7.s.egrp = egrp;
 
 	inst.w0.u64 = 0;
@@ -1029,7 +1029,7 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
 	inst.w3.u64 = 0;
 	inst.res_addr = (uintptr_t)hw_res;
 
-	rte_io_wmb();
+	plt_io_wmb();
 
 	do {
 		/* Copy CPT command to LMTLINE */
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 4e3a078a90..6953f2bdd3 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -173,7 +173,7 @@ void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
-int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa,
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa,
 				   uint8_t opcode, uint16_t ctx_len,
 				   uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 6d26b0cc01..78c181b4a4 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -82,8 +82,8 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	ctx_len = ret;
 	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->out_sa, opcode,
-				   ctx_len, egrp);
+	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->out_sa),
+				   opcode, ctx_len, egrp);
 
 	if (ret)
 		return ret;
@@ -174,8 +174,8 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	ctx_len = ret;
 	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->in_sa, opcode, ctx_len,
-				   egrp);
+	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->in_sa),
+				   opcode, ctx_len, egrp);
 	if (ret)
 		return ret;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 14/31] net/cnxk: use full context IPsec structures in fp
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (11 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 13/31] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 15/31] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
                     ` (16 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj, Pavan Nikhilesh,
	Shijith Thotton
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Use the Full context SA structures and command in IPsec fast path.
For inline outbound, populate CPT instruction as per Full context.
Add new macros and functions with respect to Full context.
Populate wqe ptr in CPT instruction with proper offset from mbuf.
Add option to override outbound inline sa iv for debug
Update mbuf len based on IP version in rx post process
purposes via environment variable. User can set env variable as:
export ETH_SEC_IV_OVR="0x0, 0x0,..."

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c          |   8 +-
 drivers/common/cnxk/roc_cpt.c                |   9 +-
 drivers/common/cnxk/roc_cpt.h                |   8 +-
 drivers/common/cnxk/roc_ie_on.h              |   6 +
 drivers/common/cnxk/roc_nix_inl.c            |  33 ++++--
 drivers/common/cnxk/roc_nix_inl.h            |  46 ++++++++
 drivers/common/cnxk/roc_nix_inl_dev.c        |   2 +-
 drivers/crypto/cnxk/cn9k_ipsec.c             |   8 +-
 drivers/event/cnxk/cn9k_worker.h             |  48 +++++---
 drivers/net/cnxk/cn9k_ethdev.h               |   3 +
 drivers/net/cnxk/cn9k_ethdev_sec.c           | 111 +++++++++++++++----
 drivers/net/cnxk/cn9k_rx.h                   |  43 ++++---
 drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c |  32 ++----
 13 files changed, 255 insertions(+), 102 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index dca8742be3..89ac900d90 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -1242,7 +1242,9 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 			ctx_len += sizeof(template->ip4);
 
 			ip4->version_ihl = RTE_IPV4_VHL_DEF;
-			ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
+			ip4->time_to_live = ipsec->tunnel.ipv4.ttl ?
+						    ipsec->tunnel.ipv4.ttl :
+						    0x40;
 			ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
 			if (ipsec->tunnel.ipv4.df)
 				frag_off |= RTE_IPV4_HDR_DF_FLAG;
@@ -1275,7 +1277,9 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 						 ((ipsec->tunnel.ipv6.flabel
 						   << RTE_IPV6_HDR_FL_SHIFT) &
 						  RTE_IPV6_HDR_FL_MASK));
-			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
+			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit ?
+						  ipsec->tunnel.ipv6.hlimit :
+						  0x40;
 			memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
 			       sizeof(struct in6_addr));
 			memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 6f0ee44b54..8fc072b9d0 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -277,7 +277,7 @@ roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 
 int
 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
-			     uint16_t param2)
+			     uint16_t param2, uint16_t opcode)
 {
 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 	struct cpt_rx_inline_lf_cfg_msg *req;
@@ -292,6 +292,7 @@ roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 	req->sso_pf_func = idev_sso_pffunc_get();
 	req->param1 = param1;
 	req->param2 = param2;
+	req->opcode = opcode;
 
 	return mbox_process(mbox);
 }
@@ -998,7 +999,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 }
 
 int
-roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, bool inb,
 		     uint16_t ctx_len, uint8_t egrp)
 {
 	union cpt_res_s res, *hw_res;
@@ -1014,7 +1015,9 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
 
 	hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
 
-	inst.w4.s.opcode_major = opcode;
+	inst.w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
+	if (inb)
+		inst.w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	inst.w4.s.opcode_minor = ctx_len >> 3;
 	inst.w4.s.param1 = 0;
 	inst.w4.s.param2 = 0;
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 6953f2bdd3..9a79998705 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -161,7 +161,8 @@ int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(
 	struct roc_cpt *roc_cpt, struct nix_inline_ipsec_cfg *inb_cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
-					   uint16_t param1, uint16_t param2);
+					   uint16_t param1, uint16_t param2,
+					   uint16_t opcode);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
 int __roc_api roc_cpt_lfs_print(struct roc_cpt *roc_cpt);
 void __roc_api roc_cpt_iq_disable(struct roc_cpt_lf *lf);
@@ -173,7 +174,6 @@ void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
-int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa,
-				   uint8_t opcode, uint16_t ctx_len,
-				   uint8_t egrp);
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, bool inb,
+				   uint16_t ctx_len, uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 2d93cb609c..961d5fc95e 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -13,6 +13,12 @@
 #define ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC 0x23
 #define ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC  0x24
 
+#define ROC_IE_ON_INB_MAX_CTX_LEN	       34UL
+#define ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT  (1 << 12)
+#define ROC_IE_ON_OUTB_MAX_CTX_LEN	       31UL
+#define ROC_IE_ON_OUTB_IKEV2_SINGLE_SA_SUPPORT (1 << 9)
+#define ROC_IE_ON_OUTB_PER_PKT_IV	       (1 << 11)
+
 /* Ucode completion codes */
 enum roc_ie_on_ucc_ipsec {
 	ROC_IE_ON_UCC_SUCCESS = 0,
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 507a15315a..be0b8066c7 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -8,11 +8,11 @@
 uint32_t soft_exp_consumer_cnt;
 roc_nix_inl_meta_pool_cb_t meta_pool_cb;
 
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
-		  1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
-		  1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ ==
+		  1UL << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ == 1024);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ ==
+		  1UL << ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
@@ -184,7 +184,7 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 
 	/* CN9K SA size is different */
 	if (roc_model_is_cn9k())
-		inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
+		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
 	else
 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
 
@@ -422,7 +422,9 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	struct roc_cpt *roc_cpt;
+	uint16_t opcode;
 	uint16_t param1;
+	uint16_t param2;
 	int rc;
 
 	if (idev == NULL)
@@ -439,17 +441,23 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	}
 
 	if (roc_model_is_cn9k()) {
-		param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
+		param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
+		param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
+		opcode =
+			((ROC_IE_ON_INB_MAX_CTX_LEN << 8) |
+			 (ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6)));
 	} else {
 		union roc_ot_ipsec_inb_param1 u;
 
 		u.u16 = 0;
 		u.s.esp_trailer_disable = 1;
 		param1 = u.u16;
+		param2 = 0;
+		opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
 	}
 
 	/* Do onetime Inbound Inline config in CPTPF */
-	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
+	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, param2, opcode);
 	if (rc && rc != -EEXIST) {
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
@@ -605,7 +613,7 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 
 	/* CN9K SA size is different */
 	if (roc_model_is_cn9k())
-		sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
+		sa_sz = ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ;
 	else
 		sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
 	/* Alloc contiguous memory of outbound SA */
@@ -1212,7 +1220,12 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
 
 	/* Nothing much to do on cn9k */
 	if (roc_model_is_cn9k()) {
-		plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+		nix = roc_nix_to_nix_priv(roc_nix);
+		outb_lf = nix->cpt_lf_base;
+		rc = roc_on_cpt_ctx_write(outb_lf, (uint64_t)sa_dptr, inb,
+					  sa_len, ROC_CPT_DFLT_ENG_GRP_SE_IE);
+		if (rc)
+			return rc;
 		return 0;
 	}
 
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 9911a48b2d..555cb28c1a 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -22,6 +22,24 @@
 	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
 #define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
 
+/* ON INB HW area */
+#define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ                                         \
+	PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN)
+/* ON INB SW reserved area */
+#define ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD 640
+#define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ                                         \
+	(ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD)
+#define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10
+
+/* ONF OUTB HW area */
+#define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ                                        \
+	PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN)
+/* ONF OUTB SW reserved area */
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ                                        \
+	(ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD)
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2 9
+
 /* OT INB HW area */
 #define ROC_NIX_INL_OT_IPSEC_INB_HW_SZ                                         \
 	PLT_ALIGN(sizeof(struct roc_ot_ipsec_inb_sa), ROC_ALIGN)
@@ -61,6 +79,34 @@
 #define ROC_NIX_INL_REAS_ZOMBIE_LIMIT	  0xFFF
 #define ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD 10
 
+static inline struct roc_ie_on_inb_sa *
+roc_nix_inl_on_ipsec_inb_sa(uintptr_t base, uint64_t idx)
+{
+	uint64_t off = idx << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2;
+
+	return PLT_PTR_ADD(base, off);
+}
+
+static inline struct roc_ie_on_outb_sa *
+roc_nix_inl_on_ipsec_outb_sa(uintptr_t base, uint64_t idx)
+{
+	uint64_t off = idx << ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2;
+
+	return PLT_PTR_ADD(base, off);
+}
+
+static inline void *
+roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(void *sa)
+{
+	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_INB_HW_SZ);
+}
+
+static inline void *
+roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa)
+{
+	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ);
+}
+
 static inline struct roc_onf_ipsec_inb_sa *
 roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
 {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 1e9b2b95d7..4fe7b5180b 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -394,7 +394,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
 
 	/* CN9K SA is different */
 	if (roc_model_is_cn9k())
-		inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
+		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
 	else
 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
 
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 78c181b4a4..84915581fc 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -29,7 +29,6 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
 	size_t ctx_len;
-	uint8_t opcode;
 	uint8_t egrp;
 	int ret;
 
@@ -80,10 +79,9 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 		return ret;
 
 	ctx_len = ret;
-	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
 	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->out_sa),
-				   opcode, ctx_len, egrp);
+				   false, ctx_len, egrp);
 
 	if (ret)
 		return ret;
@@ -133,7 +131,6 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
 	size_t ctx_len = 0;
-	uint8_t opcode;
 	uint8_t egrp;
 	int ret = 0;
 
@@ -172,10 +169,9 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 		sa->esn_en = 1;
 
 	ctx_len = ret;
-	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
 	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->in_sa),
-				   opcode, ctx_len, egrp);
+				   true, ctx_len, egrp);
 	if (ret)
 		return ret;
 
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index d86cb94a77..384b428ed1 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -617,12 +617,14 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 	struct nix_send_hdr_s *send_hdr;
 	uint64_t sa_base = txq->sa_base;
 	uint32_t pkt_len, dlen_adj, rlen;
+	struct roc_ie_on_outb_hdr *hdr;
 	uint64x2_t cmd01, cmd23;
 	uint64_t lmt_status, sa;
 	union nix_send_sg_s *sg;
+	uint32_t esn_lo, esn_hi;
 	uintptr_t dptr, nixtx;
 	uint64_t ucode_cmd[4];
-	uint64_t esn, *iv;
+	uint64_t esn;
 	uint8_t l2_len;
 
 	mdata.u64 = *rte_security_dynfield(m);
@@ -661,14 +663,19 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 
 	/* Load opcode and cptr already prepared at pkt metadata set */
 	pkt_len -= l2_len;
-	pkt_len += sizeof(struct roc_onf_ipsec_outb_hdr) +
-		    ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
+	pkt_len += (sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN) +
+		   ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
 	sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 
-	sa = (uintptr_t)roc_nix_inl_onf_ipsec_outb_sa(sa_base, mdata.sa_idx);
+	sa = (uintptr_t)roc_nix_inl_on_ipsec_outb_sa(sa_base, mdata.sa_idx);
 	ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
-	ucode_cmd[0] = (ROC_IE_ONF_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 |
-			0x40UL << 48 | pkt_len);
+	ucode_cmd[0] = (((ROC_IE_ON_OUTB_MAX_CTX_LEN << 8) |
+			 ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC)
+				<< 48 |
+			(ROC_IE_ON_OUTB_IKEV2_SINGLE_SA_SUPPORT |
+			 (ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ >>
+			  3)) << 32 |
+			pkt_len);
 
 	/* CPT Word 0 and Word 1 */
 	cmd01 = vdupq_n_u64((nixtx + 16) | (cn9k_nix_tx_ext_subs(flags) + 1));
@@ -678,35 +685,40 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 	/* CPT word 2 and 3 */
 	cmd23 = vdupq_n_u64(0);
 	cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) |
-				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
-	cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20),
+			       cmd23, 0);
+	cmd23 = vsetq_lane_u64(((uintptr_t)m + sizeof(struct rte_mbuf)) | 1,
+			       cmd23, 1);
 
 	dptr += l2_len - ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ -
-		sizeof(struct roc_onf_ipsec_outb_hdr);
+		(sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN);
 	ucode_cmd[1] = dptr;
 	ucode_cmd[2] = dptr;
 
-	/* Update IV to zero and l2 sz */
-	*(uint16_t *)(dptr + sizeof(struct roc_onf_ipsec_outb_hdr)) =
+	/* Update l2 sz */
+	*(uint16_t *)(dptr + (sizeof(struct roc_ie_on_outb_hdr) -
+			      ROC_IE_ON_MAX_IV_LEN)) =
 		rte_cpu_to_be_16(ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ);
-	iv = (uint64_t *)(dptr + 8);
-	iv[0] = 0;
-	iv[1] = 0;
 
 	/* Head wait if needed */
 	if (base)
 		roc_sso_hws_head_wait(base);
 
 	/* ESN */
-	outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
+	outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd((void *)sa);
 	esn = outb_priv->esn;
 	outb_priv->esn = esn + 1;
 
 	ucode_cmd[0] |= (esn >> 32) << 16;
-	esn = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+	esn_lo = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+	esn_hi = rte_cpu_to_be_32(esn >> 32);
 
-	/* Update ESN and IPID and IV */
-	*(uint64_t *)dptr = esn << 32 | esn;
+	/* Update ESN, IPID and IV */
+	hdr = (struct roc_ie_on_outb_hdr *)dptr;
+	hdr->ip_id = esn_lo;
+	hdr->seq = esn_lo;
+	hdr->esn = esn_hi;
+	hdr->df_tos = 0;
 
 	rte_io_wmb();
 	cn9k_sso_txq_fc_wait(txq);
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 449729f0c5..472a4b06da 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -79,6 +79,9 @@ struct cn9k_outb_priv_data {
 
 	/* Back pointer to eth sec session */
 	struct cnxk_eth_sec_sess *eth_sec;
+
+	/* IV in DBG mode */
+	uint8_t iv_dbg[ROC_IE_ON_MAX_IV_LEN];
 };
 
 struct cn9k_sec_sess_priv {
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 4dd0b6185e..88b95fb26c 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -134,6 +134,37 @@ ar_window_init(struct cn9k_inb_priv_data *inb_priv)
 	return 0;
 }
 
+static void
+outb_dbg_iv_update(struct roc_ie_on_common_sa *common_sa, const char *__iv_str)
+{
+	uint8_t *iv_dbg = common_sa->iv.aes_iv;
+	char *iv_str = strdup(__iv_str);
+	char *iv_b = NULL;
+	char *save;
+	int i, iv_len = ROC_IE_ON_MAX_IV_LEN;
+
+	if (!iv_str)
+		return;
+
+	if (common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
+	    common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
+	    common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
+	    common_sa->ctl.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
+		iv_dbg = common_sa->iv.gcm.iv;
+		iv_len = 8;
+	}
+
+	memset(iv_dbg, 0, iv_len);
+	for (i = 0; i < iv_len; i++) {
+		iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
+		if (!iv_b)
+			break;
+		iv_dbg[i] = strtoul(iv_b, NULL, 0);
+	}
+
+	free(iv_str);
+}
+
 static int
 cn9k_eth_sec_session_create(void *device,
 			    struct rte_security_session_conf *conf,
@@ -150,6 +181,7 @@ cn9k_eth_sec_session_create(void *device,
 	rte_spinlock_t *lock;
 	char tbuf[128] = {0};
 	bool inbound;
+	int ctx_len;
 	int rc = 0;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
@@ -183,21 +215,26 @@ cn9k_eth_sec_session_create(void *device,
 	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
 	sess_priv.u64 = 0;
 
+	if (!dev->outb.lf_base) {
+		plt_err("Could not allocate security session private data");
+		return -ENOMEM;
+	}
+
 	if (inbound) {
 		struct cn9k_inb_priv_data *inb_priv;
-		struct roc_onf_ipsec_inb_sa *inb_sa;
+		struct roc_ie_on_inb_sa *inb_sa;
 		uint32_t spi_mask;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
-				  ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
+				  ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD);
 
 		spi_mask = roc_nix_inl_inb_spi_range(nix, false, NULL, NULL);
 
 		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
 		 * device always for CN9K.
 		 */
-		inb_sa = (struct roc_onf_ipsec_inb_sa *)
-			 roc_nix_inl_inb_sa_get(nix, false, ipsec->spi);
+		inb_sa = (struct roc_ie_on_inb_sa *)roc_nix_inl_inb_sa_get(
+			nix, false, ipsec->spi);
 		if (!inb_sa) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to create ingress sa");
@@ -206,7 +243,7 @@ cn9k_eth_sec_session_create(void *device,
 		}
 
 		/* Check if SA is already in use */
-		if (inb_sa->ctl.valid) {
+		if (inb_sa->common_sa.ctl.valid) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Inbound SA with SPI %u already in use",
 				 ipsec->spi);
@@ -214,17 +251,26 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
-		memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
+		memset(inb_sa, 0, sizeof(struct roc_ie_on_inb_sa));
 
 		/* Fill inbound sa params */
-		rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
-		if (rc) {
+		rc = cnxk_on_ipsec_inb_sa_create(ipsec, crypto, inb_sa);
+		if (rc < 0) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to init inbound sa, rc=%d", rc);
 			goto mempool_put;
 		}
 
-		inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
+		ctx_len = rc;
+		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa, inb_sa, inbound,
+					   ctx_len);
+		if (rc) {
+			snprintf(tbuf, sizeof(tbuf),
+				 "Failed to create inbound sa, rc=%d", rc);
+			goto mempool_put;
+		}
+
+		inb_priv = roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(inb_sa);
 		/* Back pointer to get eth_sec */
 		inb_priv->eth_sec = eth_sec;
 
@@ -253,27 +299,38 @@ cn9k_eth_sec_session_create(void *device,
 		dev->inb.nb_sess++;
 	} else {
 		struct cn9k_outb_priv_data *outb_priv;
-		struct roc_onf_ipsec_outb_sa *outb_sa;
 		uintptr_t sa_base = dev->outb.sa_base;
 		struct cnxk_ipsec_outb_rlens *rlens;
+		struct roc_ie_on_outb_sa *outb_sa;
+		const char *iv_str;
 		uint32_t sa_idx;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
-				  ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
+				  ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD);
 
 		/* Alloc an sa index */
 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, 0);
 		if (rc)
 			goto mempool_put;
 
-		outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
-		outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
+		outb_sa = roc_nix_inl_on_ipsec_outb_sa(sa_base, sa_idx);
+		outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(outb_sa);
 		rlens = &outb_priv->rlens;
 
-		memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
+		memset(outb_sa, 0, sizeof(struct roc_ie_on_outb_sa));
 
 		/* Fill outbound sa params */
-		rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
+		rc = cnxk_on_ipsec_outb_sa_create(ipsec, crypto, outb_sa);
+		if (rc < 0) {
+			snprintf(tbuf, sizeof(tbuf),
+				 "Failed to init outbound sa, rc=%d", rc);
+			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+			goto mempool_put;
+		}
+
+		ctx_len = rc;
+		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa, outb_sa, inbound,
+					   ctx_len);
 		if (rc) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to init outbound sa, rc=%d", rc);
@@ -281,6 +338,18 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
+		/* Always enable explicit IV.
+		 * Copy the IV from application only when iv_gen_disable flag is
+		 * set
+		 */
+		outb_sa->common_sa.ctl.explicit_iv_en = 1;
+
+		if (conf->ipsec.options.iv_gen_disable == 1) {
+			iv_str = getenv("ETH_SEC_IV_OVR");
+			if (iv_str)
+				outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
+		}
+
 		/* Save userdata */
 		outb_priv->userdata = conf->userdata;
 		outb_priv->sa_idx = sa_idx;
@@ -288,8 +357,8 @@ cn9k_eth_sec_session_create(void *device,
 		/* Start sequence number with 1 */
 		outb_priv->seq = 1;
 
-		memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
-		if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
+		memcpy(&outb_priv->nonce, outb_sa->common_sa.iv.gcm.nonce, 4);
+		if (outb_sa->common_sa.ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
 			outb_priv->copy_salt = 1;
 
 		/* Save rlen info */
@@ -337,9 +406,9 @@ cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	struct roc_onf_ipsec_outb_sa *outb_sa;
-	struct roc_onf_ipsec_inb_sa *inb_sa;
 	struct cnxk_eth_sec_sess *eth_sec;
+	struct roc_ie_on_outb_sa *outb_sa;
+	struct roc_ie_on_inb_sa *inb_sa;
 	struct rte_mempool *mp;
 	rte_spinlock_t *lock;
 
@@ -353,14 +422,14 @@ cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
 	if (eth_sec->inb) {
 		inb_sa = eth_sec->sa;
 		/* Disable SA */
-		inb_sa->ctl.valid = 0;
+		inb_sa->common_sa.ctl.valid = 0;
 
 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
 		dev->inb.nb_sess--;
 	} else {
 		outb_sa = eth_sec->sa;
 		/* Disable SA */
-		outb_sa->ctl.valid = 0;
+		outb_sa->common_sa.ctl.valid = 0;
 
 		/* Release Outbound SA index */
 		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h
index 25a4927a33..1a9f920b41 100644
--- a/drivers/net/cnxk/cn9k_rx.h
+++ b/drivers/net/cnxk/cn9k_rx.h
@@ -171,7 +171,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 }
 
 static inline int
-ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
+ipsec_antireplay_check(struct roc_ie_on_inb_sa *sa,
 		       struct cn9k_inb_priv_data *priv, uintptr_t data,
 		       uint32_t win_sz)
 {
@@ -183,7 +183,7 @@ ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
 	uint8_t esn;
 	int rc;
 
-	esn = sa->ctl.esn_en;
+	esn = sa->common_sa.ctl.esn_en;
 	seql = rte_be_to_cpu_32(*((uint32_t *)(data + IPSEC_SQ_LO_IDX)));
 
 	if (!esn) {
@@ -200,11 +200,12 @@ ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
 	rte_spinlock_lock(&ar->lock);
 	rc = cnxk_on_anti_replay_check(seq, ar, win_sz);
 	if (esn && !rc) {
-		seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->esn_hi) << 32) |
-			    rte_be_to_cpu_32(sa->esn_low);
+		seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->common_sa.seq_t.th)
+			     << 32) |
+			    rte_be_to_cpu_32(sa->common_sa.seq_t.tl);
 		if (seq > seq_in_sa) {
-			sa->esn_low = rte_cpu_to_be_32(seql);
-			sa->esn_hi = rte_cpu_to_be_32(seqh);
+			sa->common_sa.seq_t.tl = rte_cpu_to_be_32(seql);
+			sa->common_sa.seq_t.th = rte_cpu_to_be_32(seqh);
 		}
 	}
 	rte_spinlock_unlock(&ar->lock);
@@ -266,9 +267,10 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	const union nix_rx_parse_u *rx =
 		(const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
 	struct cn9k_inb_priv_data *sa_priv;
-	struct roc_onf_ipsec_inb_sa *sa;
+	struct roc_ie_on_inb_sa *sa;
 	uint8_t lcptr = rx->lcptr;
-	struct rte_ipv4_hdr *ipv4;
+	struct rte_ipv4_hdr *ip;
+	struct rte_ipv6_hdr *ip6;
 	uint16_t data_off, res;
 	uint32_t spi, win_sz;
 	uint32_t spi_mask;
@@ -279,6 +281,7 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	res = *(uint64_t *)(res_sg0 + 8);
 	data_off = *rearm_val & (BIT_ULL(16) - 1);
 	data = (uintptr_t)m->buf_addr;
+
 	data += data_off;
 
 	rte_prefetch0((void *)data);
@@ -294,10 +297,10 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	sa_w = sa_base & (ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	spi_mask = (1ULL << sa_w) - 1;
-	sa = roc_nix_inl_onf_ipsec_inb_sa(sa_base, spi & spi_mask);
+	sa = roc_nix_inl_on_ipsec_inb_sa(sa_base, spi & spi_mask);
 
 	/* Update dynamic field with userdata */
-	sa_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(sa);
+	sa_priv = roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(sa);
 	dw = *(__uint128_t *)sa_priv;
 	*rte_security_dynfield(m) = (uint64_t)dw;
 
@@ -309,16 +312,26 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	}
 
 	/* Get total length from IPv4 header. We can assume only IPv4 */
-	ipv4 = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
-				       ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+	ip = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
+				     ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+
+	if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
+	    IPVERSION) {
+		*len = rte_be_to_cpu_16(ip->total_length) + lcptr;
+	} else {
+		PLT_ASSERT(((ip->version_ihl & 0xf0) >>
+			    RTE_IPV4_IHL_MULTIPLIER) == 6);
+		ip6 = (struct rte_ipv6_hdr *)ip;
+		*len = rte_be_to_cpu_16(ip6->payload_len) +
+		       sizeof(struct rte_ipv6_hdr) + lcptr;
+	}
 
 	/* Update data offset */
-	data_off += (ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
-		     ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+	data_off +=
+		(ROC_ONF_IPSEC_INB_SPI_SEQ_SZ + ROC_ONF_IPSEC_INB_MAX_L2_SZ);
 	*rearm_val = *rearm_val & ~(BIT_ULL(16) - 1);
 	*rearm_val |= data_off;
 
-	*len = rte_be_to_cpu_16(ipv4->total_length) + lcptr;
 	return RTE_MBUF_F_RX_SEC_OFFLOAD;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c b/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
index bfdbd1ee5d..dd8b7a525c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
@@ -14,59 +14,47 @@
 static int
 copy_outb_sa_9k(struct rte_tel_data *d, uint32_t i, void *sa)
 {
-	struct roc_onf_ipsec_outb_sa *out_sa;
 	union {
-		struct roc_ie_onf_sa_ctl ctl;
+		struct roc_ie_on_sa_ctl ctl;
 		uint64_t u64;
 	} w0;
+	struct roc_ie_on_outb_sa *out_sa;
 	char strw0[W0_MAXLEN];
 	char str[STR_MAXLEN];
 
-	out_sa = (struct roc_onf_ipsec_outb_sa *)sa;
-	w0.ctl = out_sa->ctl;
+	out_sa = (struct roc_ie_on_outb_sa *)sa;
+	w0.ctl = out_sa->common_sa.ctl;
 
 	snprintf(str, sizeof(str), "outsa_w0_%u", i);
 	snprintf(strw0, sizeof(strw0), "%" PRIu64, w0.u64);
 	rte_tel_data_add_dict_string(d, str, strw0);
 
-	snprintf(str, sizeof(str), "outsa_src_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->udp_src);
-
-	snprintf(str, sizeof(str), "outsa_dst_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->udp_dst);
-
-	snprintf(str, sizeof(str), "outsa_isrc_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->ip_src);
-
-	snprintf(str, sizeof(str), "outsa_idst_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->ip_dst);
-
 	return 0;
 }
 
 static int
 copy_inb_sa_9k(struct rte_tel_data *d, uint32_t i, void *sa)
 {
-	struct roc_onf_ipsec_inb_sa *in_sa;
 	union {
-		struct roc_ie_onf_sa_ctl ctl;
+		struct roc_ie_on_sa_ctl ctl;
 		uint64_t u64;
 	} w0;
+	struct roc_ie_on_inb_sa *in_sa;
 	char strw0[W0_MAXLEN];
 	char str[STR_MAXLEN];
 
-	in_sa = (struct roc_onf_ipsec_inb_sa *)sa;
-	w0.ctl = in_sa->ctl;
+	in_sa = (struct roc_ie_on_inb_sa *)sa;
+	w0.ctl = in_sa->common_sa.ctl;
 
 	snprintf(str, sizeof(str), "insa_w0_%u", i);
 	snprintf(strw0, sizeof(strw0), "%" PRIu64, w0.u64);
 	rte_tel_data_add_dict_string(d, str, strw0);
 
 	snprintf(str, sizeof(str), "insa_esnh_%u", i);
-	rte_tel_data_add_dict_u64(d, str, in_sa->esn_hi);
+	rte_tel_data_add_dict_u64(d, str, in_sa->common_sa.seq_t.th);
 
 	snprintf(str, sizeof(str), "insa_esnl_%u", i);
-	rte_tel_data_add_dict_u64(d, str, in_sa->esn_low);
+	rte_tel_data_add_dict_u64(d, str, in_sa->common_sa.seq_t.tl);
 
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 15/31] net/cnxk: add crypto capabilities for HMAC-SHA2
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (12 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 14/31] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 16/31] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
                     ` (15 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Add capabilities for HMAC_SHA2 and udp encap for 9k
security offload in inline mode.
Set explicit IV mode in IPsec context when IV is provided by the
application

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 79 +++++++++++++++++++++++++++---
 1 file changed, 71 insertions(+), 8 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 88b95fb26c..42ba04a4ad 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -80,6 +80,66 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 16
+				},
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 24,
+					.max = 48,
+					.increment = 24
+					},
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 64,
+					.increment = 32
+				},
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
@@ -91,7 +151,9 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
-			.options = { 0 }
+			.options = {
+					.udp_encap = 1
+				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
@@ -103,7 +165,10 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
-			.options = { 0 }
+			.options = {
+					.udp_encap = 1,
+					.iv_gen_disable = 1
+				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
@@ -338,13 +403,11 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
-		/* Always enable explicit IV.
-		 * Copy the IV from application only when iv_gen_disable flag is
-		 * set
+		/* When IV is provided by the application,
+		 * copy the IV to context and enable explicit IV flag in context.
 		 */
-		outb_sa->common_sa.ctl.explicit_iv_en = 1;
-
-		if (conf->ipsec.options.iv_gen_disable == 1) {
+		if (ipsec->options.iv_gen_disable == 1) {
+			outb_sa->common_sa.ctl.explicit_iv_en = 1;
 			iv_str = getenv("ETH_SEC_IV_OVR");
 			if (iv_str)
 				outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 16/31] common/cnxk: enable aging on CN10K platform
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (13 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 15/31] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 17/31] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
                     ` (14 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

This patch set enables aging on CNF105 variant of CN10K platform.
Enables aging statistics while dumping/reset SQ statistics.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_errata.h    |  3 +--
 drivers/common/cnxk/roc_nix_debug.c | 19 +++++++++----------
 drivers/common/cnxk/roc_nix_stats.c |  2 ++
 3 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 8dc372f956..d3b32f1786 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -30,8 +30,7 @@ roc_errata_npa_has_no_fc_stype_ststp(void)
 static inline bool
 roc_errata_nix_has_no_drop_aging(void)
 {
-	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
-		roc_model_is_cnf10kb_a0());
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0());
 }
 
 /* Errata IPBUNIXRX-40130 */
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index efac7e5b14..bd7a5d3dc2 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -472,22 +472,21 @@ nix_lf_sq_dump(__io struct nix_cn10k_sq_ctx_s *ctx, uint32_t *sqb_aura_p)
 	nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
 	nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
 
-	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
-		 ctx->vfi_lso_vld, ctx->vfi_lso_vlan1_ins_ena);
+	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", ctx->vfi_lso_vld,
+		 ctx->vfi_lso_vlan1_ins_ena);
 	nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
 		 ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
-	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
-		 ctx->vfi_lso_sb, ctx->vfi_lso_sizem1);
+	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", ctx->vfi_lso_sb,
+		 ctx->vfi_lso_sizem1);
 	nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
 
-	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->scm_lso_rem);
+	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "", (uint64_t)ctx->scm_lso_rem);
 	nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
 	nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->drop_octs);
-	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->drop_pkts);
+	nix_dump("W13: aged_drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_pkts);
+	nix_dump("W13: aged_drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_octs);
+	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
 
 	*sqb_aura_p = ctx->sqb_aura;
 }
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index 8fd5c711c3..2e5071e1bb 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -238,6 +238,8 @@ nix_stat_tx_queue_reset(struct nix *nix, uint16_t qid)
 		aq->sq_mask.pkts = ~(aq->sq_mask.pkts);
 		aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs);
 		aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts);
+		aq->sq_mask.aged_drop_octs = ~(aq->sq_mask.aged_drop_octs);
+		aq->sq_mask.aged_drop_pkts = ~(aq->sq_mask.aged_drop_pkts);
 	}
 
 	rc = mbox_process(mbox);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 17/31] common/cnxk: updated shaper profile with red algorithm
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (14 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 16/31] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 18/31] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
                     ` (13 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Updated shaper profile with user configurable RED algorithm.
This helps in configuring a TM node in red drop mode vs
stall mode.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h          | 1 +
 drivers/common/cnxk/roc_nix_tm_utils.c | 7 +++++--
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 77e4d2919b..b17623076c 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -572,6 +572,7 @@ struct roc_nix_tm_shaper_profile {
 	int32_t pkt_len_adj;
 	bool pkt_mode;
 	int8_t accuracy;
+	uint8_t red_algo;
 	/* Function to free this memory */
 	void (*free_fn)(void *profile);
 };
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index b9b605f8b1..193f9df5ff 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -1236,11 +1236,14 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 	struct nix_tm_shaper_profile *profile;
 	struct nix_tm_shaper_data cir, pir;
 
+	if (!roc_prof)
+		return;
+
 	profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
-	tm_node->red_algo = NIX_REDALG_STD;
+	tm_node->red_algo = roc_prof->red_algo;
 
 	/* C0 doesn't support STALL when both PIR & CIR are enabled */
-	if (profile && roc_model_is_cn96_cx()) {
+	if (roc_model_is_cn96_cx()) {
 		nix_tm_shaper_conf_get(profile, &cir, &pir);
 
 		if (pir.rate && cir.rate)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 18/31] common/cnxk: add 98xx A1 platform
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (15 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 17/31] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 19/31] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
                     ` (12 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for 98xx A1 pass chip.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c |  1 +
 drivers/common/cnxk/roc_model.h | 16 +++++++++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index bdbd9a96b2..04338311ec 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -65,6 +65,7 @@ static const struct model_db {
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 0, ROC_MODEL_CN98xx_A0, "cn98xx_a0"},
+	{VENDOR_CAVIUM, PART_98xx, 0, 1, ROC_MODEL_CN98xx_A1, "cn98xx_a1"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 0, ROC_MODEL_CN96xx_A0, "cn96xx_a0"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 1, ROC_MODEL_CN96xx_B0, "cn96xx_b0"},
 	{VENDOR_CAVIUM, PART_96xx, 2, 0, ROC_MODEL_CN96xx_C0, "cn96xx_c0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index d231d44b60..57a8af06fc 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -21,6 +21,7 @@ struct roc_model {
 #define ROC_MODEL_CNF95xxN_A1  BIT_ULL(14)
 #define ROC_MODEL_CNF95xxN_B0  BIT_ULL(15)
 #define ROC_MODEL_CN98xx_A0    BIT_ULL(16)
+#define ROC_MODEL_CN98xx_A1    BIT_ULL(17)
 #define ROC_MODEL_CN106xx_A0   BIT_ULL(20)
 #define ROC_MODEL_CNF105xx_A0  BIT_ULL(21)
 #define ROC_MODEL_CNF105xxN_A0 BIT_ULL(22)
@@ -38,10 +39,11 @@ struct roc_model {
 } __plt_cache_aligned;
 
 #define ROC_MODEL_CN96xx_Ax (ROC_MODEL_CN96xx_A0 | ROC_MODEL_CN96xx_B0)
+#define ROC_MODEL_CN98xx_Ax (ROC_MODEL_CN98xx_A0 | ROC_MODEL_CN98xx_A1)
 #define ROC_MODEL_CN9K                                                         \
 	(ROC_MODEL_CN96xx_Ax | ROC_MODEL_CN96xx_C0 | ROC_MODEL_CNF95xx_A0 |    \
 	 ROC_MODEL_CNF95xx_B0 | ROC_MODEL_CNF95xxMM_A0 |                       \
-	 ROC_MODEL_CNF95xxO_A0 | ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CN98xx_A0 | \
+	 ROC_MODEL_CNF95xxO_A0 | ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CN98xx_Ax | \
 	 ROC_MODEL_CNF95xxN_A1 | ROC_MODEL_CNF95xxN_B0)
 #define ROC_MODEL_CNF9K                                                        \
 	(ROC_MODEL_CNF95xx_A0 | ROC_MODEL_CNF95xx_B0 |                         \
@@ -110,10 +112,22 @@ roc_model_is_cn10k(void)
 
 static inline uint64_t
 roc_model_is_cn98xx(void)
+{
+	return (roc_model->flag & ROC_MODEL_CN98xx_Ax);
+}
+
+static inline uint64_t
+roc_model_is_cn98xx_a0(void)
 {
 	return (roc_model->flag & ROC_MODEL_CN98xx_A0);
 }
 
+static inline uint64_t
+roc_model_is_cn98xx_a1(void)
+{
+	return (roc_model->flag & ROC_MODEL_CN98xx_A1);
+}
+
 static inline uint64_t
 roc_model_is_cn96_a0(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 19/31] net/cnxk: enable additional ciphers for inline
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (16 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 18/31] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 20/31] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
                     ` (11 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable below ciphers and auths as part of capabilities for inline IPsec
AES_CTR
AES_XCBC_MAC
AES_GMAC

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 86 ++++++++++++++++++++++++++++++
 1 file changed, 86 insertions(+)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 42ba04a4ad..2dc9fe1580 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -10,6 +10,27 @@
 #include <cnxk_security.h>
 
 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -60,6 +81,71 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
+	{	/* AES-XCBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0,
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	{	/* SHA1 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 20/31] net/cnxk: enable 3des-cbc cipher capability
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (17 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 19/31] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 21/31] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
                     ` (10 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable 3DES-CBC cipher capability for inline IPsec
processing.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c |  3 +++
 drivers/crypto/cnxk/cn9k_ipsec.c    |  6 ++++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 21 ++++++++++++++++++++-
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 89ac900d90..a44254931e 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -1033,6 +1033,9 @@ on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
 			case RTE_CRYPTO_CIPHER_NULL:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
 				break;
+			case RTE_CRYPTO_CIPHER_3DES_CBC:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_3DES_CBC;
+				break;
 			case RTE_CRYPTO_CIPHER_AES_CBC:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
 				aes_key_len = cipher_xform->cipher.key.length;
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 84915581fc..3d37449907 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -248,6 +248,12 @@ cn9k_ipsec_xform_verify(struct rte_security_ipsec_xform *ipsec,
 				plt_err("Transport mode AES-CBC AES-XCBC is not supported");
 				return -ENOTSUP;
 			}
+
+			if ((cipher->algo == RTE_CRYPTO_CIPHER_3DES_CBC) &&
+			    (auth->algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC)) {
+				plt_err("Transport mode 3DES-CBC AES-XCBC is not supported");
+				return -ENOTSUP;
+			}
 		}
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 2dc9fe1580..9536a99c8e 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -30,7 +30,26 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, },
 		}, }
 	},
-
+	{	/* 3DES CBC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 8
+				}
+			}, },
+		}, }
+	},
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 21/31] net/cnxk: skip PFC configuration on LBK
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (18 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 20/31] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 22/31] common/cnxk: add support for CPT second pass Nithin Dabilpuram
                     ` (9 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

CNXK platforms do not support PFC on LBK so skipping
configuration on LBK interfaces.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     | 2 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 85ad70e50b..0603d73a90 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1860,7 +1860,7 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 		pfc_conf.tx_pause.rx_qid = i;
 		rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
 							      &pfc_conf);
-		if (rc)
+		if (rc && rc != -ENOTSUP)
 			plt_err("Failed to reset PFC. error code(%d)", rc);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 1592971073..64beabdd12 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -356,8 +356,8 @@ cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
 		return -ENOTSUP;
 	}
 
-	if (roc_nix_is_sdp(nix)) {
-		plt_err("Prio flow ctrl config is not allowed on SDP");
+	if (roc_nix_is_sdp(nix) || roc_nix_is_lbk(nix)) {
+		plt_nix_dbg("Prio flow ctrl config is not allowed on SDP/LBK");
 		return -ENOTSUP;
 	}
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 22/31] common/cnxk: add support for CPT second pass
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (19 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 21/31] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 23/31] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
                     ` (8 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Added mailbox for masking and setting nix_rq_ctx
parameters and enabling rq masking in ipsec_cfg1
so second pass is applied to all rq's

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/hw/nix.h      |  4 +-
 drivers/common/cnxk/roc_mbox.h    | 23 ++++++++-
 drivers/common/cnxk/roc_nix_inl.c | 81 +++++++++++++++++++++++++++++++
 3 files changed, 106 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 5863e358e0..a5352644ca 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -1242,7 +1242,9 @@ struct nix_cn10k_rq_ctx_s {
 	uint64_t ipsech_ena : 1;
 	uint64_t ena_wqwd : 1;
 	uint64_t cq : 20;
-	uint64_t rsvd_36_24 : 13;
+	uint64_t rsvd_34_24 : 11;
+	uint64_t port_ol4_dis : 1;
+	uint64_t port_il4_dis : 1;
 	uint64_t lenerr_dis : 1;
 	uint64_t csum_il4_dis : 1;
 	uint64_t csum_ol4_dis : 1;
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 912de1121b..688c70b4ee 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -265,7 +265,9 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)            \
 	M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg,        \
-	  msg_req, nix_inline_ipsec_cfg)
+	  msg_req, nix_inline_ipsec_cfg)				       \
+	M(NIX_LF_INLINE_RQ_CFG, 0x8024, nix_lf_inline_rq_cfg,                  \
+	  nix_rq_cpt_field_mask_cfg_req, msg_rsp)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1088,6 +1090,25 @@ struct nix_mark_format_cfg_rsp {
 	uint8_t __io mark_format_idx;
 };
 
+struct nix_rq_cpt_field_mask_cfg_req {
+	struct mbox_msghdr hdr;
+#define RQ_CTX_MASK_MAX 6
+	union {
+		uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
+		struct nix_cn10k_rq_ctx_s rq_set;
+	};
+	union {
+		uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
+		struct nix_cn10k_rq_ctx_s rq_mask;
+	};
+	struct nix_lf_rx_ipec_cfg1_req {
+		uint32_t __io spb_cpt_aura;
+		uint8_t __io rq_mask_enable;
+		uint8_t __io spb_cpt_sizem1;
+		uint8_t __io spb_cpt_enable;
+	} ipsec_cfg1;
+};
+
 struct nix_lso_format_cfg {
 	struct mbox_msghdr hdr;
 	uint64_t __io field_mask;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index be0b8066c7..cdf31b1f0c 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -416,6 +416,70 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
 	return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
 }
 
+static int
+nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct nix_rq_cpt_field_mask_cfg_req *msk_req;
+	struct idev_cfg *idev = idev_get_cfg();
+	struct mbox *mbox = (&nix->dev)->mbox;
+	struct idev_nix_inl_cfg *inl_cfg;
+	uint64_t aura_handle;
+	int rc = -ENOSPC;
+	int i;
+
+	if (!idev)
+		return rc;
+
+	inl_cfg = &idev->inl_cfg;
+	msk_req = mbox_alloc_msg_nix_lf_inline_rq_cfg(mbox);
+	if (msk_req == NULL)
+		return rc;
+
+	for (i = 0; i < RQ_CTX_MASK_MAX; i++)
+		msk_req->rq_ctx_word_mask[i] = 0xFFFFFFFFFFFFFFFF;
+
+	msk_req->rq_set.len_ol3_dis = 1;
+	msk_req->rq_set.len_ol4_dis = 1;
+	msk_req->rq_set.len_il3_dis = 1;
+
+	msk_req->rq_set.len_il4_dis = 1;
+	msk_req->rq_set.csum_ol4_dis = 1;
+	msk_req->rq_set.csum_il4_dis = 1;
+
+	msk_req->rq_set.lenerr_dis = 1;
+	msk_req->rq_set.port_ol4_dis = 1;
+	msk_req->rq_set.port_il4_dis = 1;
+
+	msk_req->rq_set.lpb_drop_ena = 0;
+	msk_req->rq_set.spb_drop_ena = 0;
+	msk_req->rq_set.xqe_drop_ena = 0;
+
+	msk_req->rq_mask.len_ol3_dis = ~(msk_req->rq_set.len_ol3_dis);
+	msk_req->rq_mask.len_ol4_dis = ~(msk_req->rq_set.len_ol4_dis);
+	msk_req->rq_mask.len_il3_dis = ~(msk_req->rq_set.len_il3_dis);
+
+	msk_req->rq_mask.len_il4_dis = ~(msk_req->rq_set.len_il4_dis);
+	msk_req->rq_mask.csum_ol4_dis = ~(msk_req->rq_set.csum_ol4_dis);
+	msk_req->rq_mask.csum_il4_dis = ~(msk_req->rq_set.csum_il4_dis);
+
+	msk_req->rq_mask.lenerr_dis = ~(msk_req->rq_set.lenerr_dis);
+	msk_req->rq_mask.port_ol4_dis = ~(msk_req->rq_set.port_ol4_dis);
+	msk_req->rq_mask.port_il4_dis = ~(msk_req->rq_set.port_il4_dis);
+
+	msk_req->rq_mask.lpb_drop_ena = ~(msk_req->rq_set.lpb_drop_ena);
+	msk_req->rq_mask.spb_drop_ena = ~(msk_req->rq_set.spb_drop_ena);
+	msk_req->rq_mask.xqe_drop_ena = ~(msk_req->rq_set.xqe_drop_ena);
+
+	aura_handle = roc_npa_zero_aura_handle();
+	msk_req->ipsec_cfg1.spb_cpt_aura = roc_npa_aura_handle_to_aura(aura_handle);
+	msk_req->ipsec_cfg1.rq_mask_enable = enable;
+	msk_req->ipsec_cfg1.spb_cpt_sizem1 = inl_cfg->buf_sz;
+	msk_req->ipsec_cfg1.spb_cpt_enable = enable;
+
+	return mbox_process(mbox);
+}
+
 int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
@@ -472,6 +536,14 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 		nix->need_meta_aura = true;
 		idev->inl_cfg.refs++;
 	}
+
+	if (roc_model_is_cn10kb_a0()) {
+		rc = nix_inl_rq_mask_cfg(roc_nix, true);
+		if (rc) {
+			plt_err("Failed to get rq mask rc=%d", rc);
+			return rc;
+		}
+	}
 	nix->inl_inb_ena = true;
 	return 0;
 }
@@ -481,6 +553,7 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 {
 	struct idev_cfg *idev = idev_get_cfg();
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	int rc;
 
 	if (!nix->inl_inb_ena)
 		return 0;
@@ -496,6 +569,14 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 			nix_inl_meta_aura_destroy();
 	}
 
+	if (roc_model_is_cn10kb_a0()) {
+		rc = nix_inl_rq_mask_cfg(roc_nix, false);
+		if (rc) {
+			plt_err("Failed to get rq mask rc=%d", rc);
+			return rc;
+		}
+	}
+
 	/* Flush Inbound CTX cache entries */
 	roc_nix_cpt_ctx_cache_sync(roc_nix);
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 23/31] common/cnxk: add CQ limit associated with SQ
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (20 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 22/31] common/cnxk: add support for CPT second pass Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 24/31] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
                     ` (7 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

Update CQ threshold limit associated with sq. This is
used when we need completions for packets that are successfully
transmitted.

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       | 1 +
 drivers/common/cnxk/roc_nix_queue.c | 2 ++
 2 files changed, 3 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index b17623076c..8869cf5169 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -346,6 +346,7 @@ struct roc_nix_sq {
 	uint32_t nb_desc;
 	uint16_t qid;
 	uint16_t cqid;
+	uint16_t cq_drop_thresh;
 	bool sso_ena;
 	bool cq_ena;
 	/* End of Input parameters */
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index b197de0a77..60303329cc 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -907,6 +907,7 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
 	aq->sq.sso_ena = !!sq->sso_ena;
 	aq->sq.cq_ena = !!sq->cq_ena;
 	aq->sq.cq = sq->cqid;
+	aq->sq.cq_limit = sq->cq_drop_thresh;
 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
 		aq->sq.sqe_stype = NIX_STYPE_STP;
 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
@@ -1024,6 +1025,7 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
 	aq->sq.sso_ena = !!sq->sso_ena;
 	aq->sq.cq_ena = !!sq->cq_ena;
 	aq->sq.cq = sq->cqid;
+	aq->sq.cq_limit = sq->cq_drop_thresh;
 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
 		aq->sq.sqe_stype = NIX_STYPE_STP;
 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 24/31] common/cnxk: support Tx compl event via RQ to CQ mapping
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (21 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 23/31] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 25/31] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
                     ` (6 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

This patch adds RoC support for Tx completion events via
RQ to CQ mapping.

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix.c       | 5 ++++-
 drivers/common/cnxk/roc_nix.h       | 2 ++
 drivers/common/cnxk/roc_nix_queue.c | 7 ++-----
 drivers/net/cnxk/cnxk_ethdev.c      | 3 +++
 4 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 151d8c3426..4bb306b60e 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -154,7 +154,10 @@ roc_nix_lf_alloc(struct roc_nix *roc_nix, uint32_t nb_rxq, uint32_t nb_txq,
 		return rc;
 	req->rq_cnt = nb_rxq;
 	req->sq_cnt = nb_txq;
-	req->cq_cnt = nb_rxq;
+	if (roc_nix->tx_compl_ena)
+		req->cq_cnt = nb_rxq + nb_txq;
+	else
+		req->cq_cnt = nb_rxq;
 	/* XQESZ can be W64 or W16 */
 	req->xqe_sz = NIX_XQESZ_W16;
 	req->rss_sz = nix->reta_sz;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 8869cf5169..8cea3232d0 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -287,6 +287,7 @@ struct roc_nix_stats_queue {
 struct roc_nix_rq {
 	/* Input parameters */
 	uint16_t qid;
+	uint16_t cqid; /* Not valid when SSO is enabled */
 	uint16_t bpf_id;
 	uint64_t aura_handle;
 	bool ipsech_ena;
@@ -412,6 +413,7 @@ struct roc_nix {
 	uint16_t max_sqb_count;
 	enum roc_nix_rss_reta_sz reta_sz;
 	bool enable_loop;
+	bool tx_compl_ena;
 	bool hw_vlan_ins;
 	uint8_t lock_rx_ctx;
 	uint16_t sqb_slack;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 60303329cc..405d9a8274 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -268,7 +268,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
-		aq->rq.cq = rq->qid;
+		aq->rq.cq = rq->cqid;
 	}
 
 	if (rq->ipsech_ena)
@@ -395,7 +395,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
-		aq->rq.cq = rq->qid;
+		aq->rq.cq = rq->cqid;
 	}
 
 	if (rq->ipsech_ena) {
@@ -644,9 +644,6 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
 	if (cq == NULL)
 		return NIX_ERR_PARAM;
 
-	if (cq->qid >= nix->nb_rx_queues)
-		return NIX_ERR_QUEUE_INVALID_RANGE;
-
 	qsize = nix_qsize_clampup(cq->nb_desc);
 	cq->nb_desc = nix_qsize_to_val(qsize);
 	cq->qmask = cq->nb_desc - 1;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 0603d73a90..4ed81c3d98 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -606,6 +606,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	/* Setup ROC RQ */
 	rq = &dev->rqs[qid];
 	rq->qid = qid;
+	rq->cqid = cq->qid;
 	rq->aura_handle = mp->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
@@ -1168,6 +1169,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	if (roc_nix_is_lbk(nix))
 		nix->enable_loop = eth_dev->data->dev_conf.lpbk_mode;
 
+	nix->tx_compl_ena = 0;
+
 	/* Alloc a nix lf */
 	rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
 	if (rc) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 25/31] event/cnxk: wait for CPT fc on wqe path
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (22 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 24/31] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 26/31] net/cnxk: limit port specific SA table size Nithin Dabilpuram
                     ` (5 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Wait for CPT flow control on WQE path. This is to
avoid CPT queue overflow and thereby a CPT misc
interrupt.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 07c88a974e..f5205da0e2 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -233,6 +233,16 @@ cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
 	}
 }
 
+static __rte_always_inline void
+cn10k_nix_sec_fc_wait_one(struct cn10k_eth_txq *txq)
+{
+	uint64_t nb_desc = txq->cpt_desc;
+	uint64_t *fc = txq->cpt_fc;
+
+	while (nb_desc <= __atomic_load_n(fc, __ATOMIC_RELAXED))
+		;
+}
+
 static __rte_always_inline void
 cn10k_nix_sec_fc_wait(struct cn10k_eth_txq *txq, uint16_t nb_pkts)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 26/31] net/cnxk: limit port specific SA table size
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (23 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 25/31] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 27/31] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
                     ` (4 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Limit port specific SA table size to 1 entry when not used.
This is usefule when inline device is enabled as then
Port specific SA table will not be used for Inline IPsec
inbound processing.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c         | 4 ++++
 drivers/net/cnxk/cnxk_ethdev.h         | 5 ++++-
 drivers/net/cnxk/cnxk_ethdev_devargs.c | 3 +--
 3 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 4ed81c3d98..89f8cc107d 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -103,6 +103,10 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 	int i, rc = 0;
 
 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		/* Setup minimum SA table when inline device is used */
+		nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
+		nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;
+
 		/* Setup Inline Inbound */
 		rc = roc_nix_inl_inb_init(nix);
 		if (rc) {
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index a4178cfeff..bed0e0eada 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -273,8 +273,11 @@ TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
 
 /* Inbound security data */
 struct cnxk_eth_dev_sec_inb {
+	/* IPSec inbound min SPI */
+	uint32_t min_spi;
+
 	/* IPSec inbound max SPI */
-	uint16_t max_spi;
+	uint32_t max_spi;
 
 	/* Using inbound with inline device */
 	bool inl_dev;
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 4ded850622..d28509dbda 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -320,12 +320,11 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 null_devargs:
 	dev->scalar_ena = !!scalar_enable;
 	dev->inb.no_inl_dev = !!no_inl_dev;
+	dev->inb.min_spi = ipsec_in_min_spi;
 	dev->inb.max_spi = ipsec_in_max_spi;
 	dev->outb.max_sa = ipsec_out_max_sa;
 	dev->outb.nb_desc = outb_nb_desc;
 	dev->outb.nb_crypto_qs = outb_nb_crypto_qs;
-	dev->nix.ipsec_in_min_spi = ipsec_in_min_spi;
-	dev->nix.ipsec_in_max_spi = ipsec_in_max_spi;
 	dev->nix.ipsec_out_max_sa = ipsec_out_max_sa;
 	dev->nix.rss_tag_as_xor = !!rss_tag_as_xor;
 	dev->nix.max_sqb_count = sqb_count;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 27/31] net/cnxk: add support for crypto cipher DES-CBC
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (24 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 26/31] net/cnxk: limit port specific SA table size Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 28/31] net/cnxk: Add support for crypto auth alg MD5 Nithin Dabilpuram
                     ` (3 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Add support for DES-CBC cipher for security offload in inline mode.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c |  5 +++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 20 ++++++++++++++++++++
 2 files changed, 25 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index a44254931e..f25df54254 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -817,6 +817,7 @@ cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
 	case RTE_CRYPTO_CIPHER_AES_CTR:
 		ivlen = 8;
 		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		ivlen = ROC_CPT_DES_BLOCK_LENGTH;
 		break;
@@ -898,6 +899,7 @@ cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
 	case RTE_CRYPTO_CIPHER_AES_CBC:
 		roundup_byte = 16;
 		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		roundup_byte = 8;
 		break;
@@ -1033,6 +1035,9 @@ on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
 			case RTE_CRYPTO_CIPHER_NULL:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
 				break;
+			case RTE_CRYPTO_CIPHER_DES_CBC:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_DES_CBC;
+				break;
 			case RTE_CRYPTO_CIPHER_3DES_CBC:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_3DES_CBC;
 				break;
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 9536a99c8e..aa9aa29b4a 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -30,6 +30,26 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, },
 		}, }
 	},
+	{	/* DES  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
 	{	/* 3DES CBC  */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 28/31] net/cnxk: Add support for crypto auth alg MD5
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (25 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 27/31] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 29/31] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
                     ` (2 subsequent siblings)
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Add support for MD5 auth algo for security offload in inline mode.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c |  4 ++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 20 ++++++++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index f25df54254..55382d3129 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -852,6 +852,7 @@ cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
 	case RTE_CRYPTO_AUTH_NULL:
 		icv = 0;
 		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		icv = 12;
 		break;
@@ -1208,6 +1209,7 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 		ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template);
 	} else {
 		switch (ctl->auth_type) {
+		case ROC_IE_ON_SA_AUTH_MD5:
 		case ROC_IE_ON_SA_AUTH_SHA1:
 			template = &out_sa->sha1.template;
 			ctx_len = offsetof(struct roc_ie_on_outb_sa,
@@ -1306,6 +1308,7 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 		case RTE_CRYPTO_AUTH_AES_GMAC:
 		case RTE_CRYPTO_AUTH_NULL:
 			break;
+		case RTE_CRYPTO_AUTH_MD5_HMAC:
 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
 			memcpy(out_sa->sha1.hmac_key, auth_key, auth_key_len);
 			break;
@@ -1354,6 +1357,7 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
 		switch (auth_xform->auth.algo) {
 		case RTE_CRYPTO_AUTH_NULL:
 			break;
+		case RTE_CRYPTO_AUTH_MD5_HMAC:
 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
 			memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key,
 			       auth_key_len);
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index aa9aa29b4a..90cb1d252d 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -185,6 +185,26 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* SHA1 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 29/31] net/cnxk: enable esn and antireplay support
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (26 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 28/31] net/cnxk: Add support for crypto auth alg MD5 Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 30/31] common/cnxk: dump device basic info to file Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 31/31] net/cnxk: dumps device private information Nithin Dabilpuram
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable ESN and anti-replay in IPsec capabilities
Add support for session update security API
Fix the cpt command population for ESN enabled case

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 139 ++++++++++++++++++++++++++++-
 1 file changed, 137 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 90cb1d252d..6dc5ebed5d 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -296,8 +296,10 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
 			.options = {
-					.udp_encap = 1
+					.udp_encap = 1,
+					.esn = 1
 				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
@@ -312,7 +314,8 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
 			.options = {
 					.udp_encap = 1,
-					.iv_gen_disable = 1
+					.iv_gen_disable = 1,
+					.esn = 1
 				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
@@ -375,6 +378,137 @@ outb_dbg_iv_update(struct roc_ie_on_common_sa *common_sa, const char *__iv_str)
 	free(iv_str);
 }
 
+static int
+cn9k_eth_sec_session_update(void *device,
+			    struct rte_security_session *sess,
+			    struct rte_security_session_conf *conf)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_security_ipsec_xform *ipsec;
+	struct cn9k_outb_priv_data *outb_priv;
+	struct cnxk_ipsec_outb_rlens *rlens;
+	struct cn9k_sec_sess_priv sess_priv;
+	struct rte_crypto_sym_xform *crypto;
+	struct cnxk_eth_sec_sess *eth_sec;
+	struct roc_ie_on_outb_sa *outb_sa;
+	rte_spinlock_t *lock;
+	char tbuf[128] = {0};
+	const char *iv_str;
+	uint32_t sa_idx;
+	int ctx_len;
+	int rc = 0;
+
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+		return -ENOTSUP;
+
+	if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
+		return -ENOTSUP;
+
+	if (rte_security_dynfield_register() < 0)
+		return -ENOTSUP;
+
+	ipsec = &conf->ipsec;
+	crypto = conf->crypto_xform;
+
+	if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+		return -ENOTSUP;
+
+	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+	if (!eth_sec)
+		return -ENOENT;
+
+	eth_sec->spi = conf->ipsec.spi;
+
+	lock = &dev->outb.lock;
+	rte_spinlock_lock(lock);
+
+	outb_sa = eth_sec->sa;
+	outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(outb_sa);
+	sa_idx = outb_priv->sa_idx;
+
+	/* Disable SA */
+	outb_sa->common_sa.ctl.valid = 0;
+
+	/* Sync SA content */
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+	sess_priv.u64 = 0;
+	memset(outb_sa, 0, sizeof(struct roc_ie_on_outb_sa));
+
+	/* Fill outbound sa params */
+	rc = cnxk_on_ipsec_outb_sa_create(ipsec, crypto, outb_sa);
+	if (rc < 0) {
+		snprintf(tbuf, sizeof(tbuf),
+			 "Failed to init outbound sa, rc=%d", rc);
+		rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+		goto exit;
+	}
+
+	ctx_len = rc;
+	rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa, outb_sa, false,
+				   ctx_len);
+	if (rc) {
+		snprintf(tbuf, sizeof(tbuf),
+			 "Failed to init outbound sa, rc=%d", rc);
+		rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+		goto exit;
+	}
+
+	/* When IV is provided by the application,
+	 * copy the IV to context and enable explicit IV flag in context.
+	 */
+	if (ipsec->options.iv_gen_disable == 1) {
+		outb_sa->common_sa.ctl.explicit_iv_en = 1;
+		iv_str = getenv("ETH_SEC_IV_OVR");
+		if (iv_str)
+			outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
+	}
+
+	outb_priv->userdata = conf->userdata;
+	outb_priv->eth_sec = eth_sec;
+	/* Start sequence number with 1 */
+	outb_priv->esn = ipsec->esn.value;
+
+	memcpy(&outb_priv->nonce, outb_sa->common_sa.iv.gcm.nonce, 4);
+	if (outb_sa->common_sa.ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
+		outb_priv->copy_salt = 1;
+
+	rlens = &outb_priv->rlens;
+	/* Save rlen info */
+	cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
+
+	sess_priv.sa_idx = outb_priv->sa_idx;
+	sess_priv.roundup_byte = rlens->roundup_byte;
+	sess_priv.roundup_len = rlens->roundup_len;
+	sess_priv.partial_len = rlens->partial_len;
+
+	/* Pointer from eth_sec -> outb_sa */
+	eth_sec->sa = outb_sa;
+	eth_sec->sess = sess;
+	eth_sec->sa_idx = sa_idx;
+	eth_sec->spi = ipsec->spi;
+
+	/* Sync SA content */
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+	rte_spinlock_unlock(lock);
+
+	plt_nix_dbg("Created outbound session with spi=%u, sa_idx=%u",
+		    eth_sec->spi, eth_sec->sa_idx);
+
+	/* Update fast path info in priv area.
+	 */
+	set_sec_session_private_data(sess, (void *)sess_priv.u64);
+
+	return 0;
+exit:
+	rte_spinlock_unlock(lock);
+	if (rc)
+		plt_err("%s", tbuf);
+	return rc;
+}
+
 static int
 cn9k_eth_sec_session_create(void *device,
 			    struct rte_security_session_conf *conf,
@@ -678,6 +812,7 @@ cn9k_eth_sec_ops_override(void)
 
 	/* Update platform specific ops */
 	cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
+	cnxk_eth_sec_ops.session_update = cn9k_eth_sec_session_update;
 	cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
 	cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 30/31] common/cnxk: dump device basic info to file
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (27 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 29/31] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  2022-09-05 13:32   ` [PATCH v2 31/31] net/cnxk: dumps device private information Nithin Dabilpuram
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Add helper API to complete device info for debug purposes.
This is used by ethdev dump API to dump ethdev's internal info.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_nix.h             |  12 +-
 drivers/common/cnxk/roc_nix_debug.c       | 726 +++++++++++-----------
 drivers/common/cnxk/roc_nix_inl.h         |   4 +-
 drivers/common/cnxk/roc_nix_inl_dev_irq.c |   6 +-
 drivers/common/cnxk/roc_nix_irq.c         |   6 +-
 drivers/common/cnxk/roc_nix_priv.h        |   2 +-
 drivers/common/cnxk/roc_nix_tm.c          |   4 +-
 7 files changed, 395 insertions(+), 365 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 8cea3232d0..5c2a869eba 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -493,13 +493,13 @@ int __roc_api roc_nix_rx_drop_re_set(struct roc_nix *roc_nix, bool ena);
 /* Debug */
 int __roc_api roc_nix_lf_get_reg_count(struct roc_nix *roc_nix);
 int __roc_api roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data);
-int __roc_api roc_nix_queues_ctx_dump(struct roc_nix *roc_nix);
+int __roc_api roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file);
 void __roc_api roc_nix_cqe_dump(const struct nix_cqe_hdr_s *cq);
-void __roc_api roc_nix_rq_dump(struct roc_nix_rq *rq);
-void __roc_api roc_nix_cq_dump(struct roc_nix_cq *cq);
-void __roc_api roc_nix_sq_dump(struct roc_nix_sq *sq);
-void __roc_api roc_nix_tm_dump(struct roc_nix *roc_nix);
-void __roc_api roc_nix_dump(struct roc_nix *roc_nix);
+void __roc_api roc_nix_rq_dump(struct roc_nix_rq *rq, FILE *file);
+void __roc_api roc_nix_cq_dump(struct roc_nix_cq *cq, FILE *file);
+void __roc_api roc_nix_sq_dump(struct roc_nix_sq *sq, FILE *file);
+void __roc_api roc_nix_tm_dump(struct roc_nix *roc_nix, FILE *file);
+void __roc_api roc_nix_dump(struct roc_nix *roc_nix, FILE *file);
 
 /* IRQ */
 void __roc_api roc_nix_rx_queue_intr_enable(struct roc_nix *roc_nix,
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index bd7a5d3dc2..6f82350b53 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -5,14 +5,27 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
-#define nix_dump plt_dump
+
+#define nix_dump(file, fmt, ...) do {                                           \
+	if ((file) == NULL)							\
+		plt_dump(fmt, ##__VA_ARGS__);					\
+	else                                                                    \
+		fprintf(file, fmt "\n", ##__VA_ARGS__);                         \
+} while (0)
+
 #define NIX_REG_INFO(reg)                                                      \
 	{                                                                      \
 		reg, #reg                                                      \
 	}
 #define NIX_REG_NAME_SZ 48
 
-#define nix_dump_no_nl plt_dump_no_nl
+#define nix_dump_no_nl(file, fmt, ...) do {                                     \
+	if ((file) == NULL)                                                     \
+		plt_dump_no_nl(fmt, ##__VA_ARGS__);				\
+	else                                                                    \
+		fprintf(file, fmt, ##__VA_ARGS__);                              \
+} while (0)
+
 
 struct nix_lf_reg_info {
 	uint32_t offset;
@@ -45,7 +58,7 @@ static const struct nix_lf_reg_info nix_lf_reg[] = {
 };
 
 static void
-nix_bitmap_dump(struct plt_bitmap *bmp)
+nix_bitmap_dump(struct plt_bitmap *bmp, FILE *file)
 {
 	uint32_t pos = 0, start_pos;
 	uint64_t slab = 0;
@@ -57,7 +70,7 @@ nix_bitmap_dump(struct plt_bitmap *bmp)
 
 	start_pos = pos;
 
-	nix_dump_no_nl("  \t\t[");
+	nix_dump_no_nl(file, "  \t\t[");
 	do {
 		if (!slab)
 			break;
@@ -65,12 +78,12 @@ nix_bitmap_dump(struct plt_bitmap *bmp)
 
 		for (i = 0; i < 64; i++)
 			if (slab & (1ULL << i))
-				nix_dump_no_nl("%d, ", i);
+				nix_dump_no_nl(file, "%d, ", i);
 
 		if (!plt_bitmap_scan(bmp, &pos, &slab))
 			break;
 	} while (start_pos != pos);
-	nix_dump_no_nl(" ]");
+	nix_dump_no_nl(file, " ]");
 }
 
 int
@@ -114,6 +127,7 @@ roc_nix_lf_get_reg_count(struct roc_nix *roc_nix)
 int
 nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data)
 {
+	FILE *file = NULL;
 	bool dump_stdout;
 	uint64_t reg;
 	uint32_t i;
@@ -123,7 +137,7 @@ nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data)
 	for (i = 0; i < PLT_DIM(nix_lf_reg); i++) {
 		reg = plt_read64(nix_lf_base + nix_lf_reg[i].offset);
 		if (dump_stdout && reg)
-			nix_dump("%32s = 0x%" PRIx64, nix_lf_reg[i].name, reg);
+			nix_dump(file, "%32s = 0x%" PRIx64, nix_lf_reg[i].name, reg);
 		if (data)
 			*data++ = reg;
 	}
@@ -136,6 +150,7 @@ nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats,
 		     uint8_t lf_rx_stats)
 {
 	uint32_t i, count = 0;
+	FILE *file = NULL;
 	bool dump_stdout;
 	uint64_t reg;
 
@@ -145,7 +160,7 @@ nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats,
 	for (i = 0; i < lf_tx_stats; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_TX_STATX(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_TX_STATX", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_TX_STATX", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -156,7 +171,7 @@ nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats,
 	for (i = 0; i < lf_rx_stats; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_RX_STATX(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_RX_STATX", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_RX_STATX", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -170,6 +185,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 		    uint16_t cints)
 {
 	uint32_t i, count = 0;
+	FILE *file = NULL;
 	bool dump_stdout;
 	uint64_t reg;
 
@@ -179,7 +195,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_CNT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_CNT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_CNT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -190,7 +206,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_INT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_INT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_INT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -201,7 +217,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1S(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1S",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1S",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -212,7 +228,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1C(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1C",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1C",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -223,7 +239,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_CNT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_CNT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_CNT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -234,7 +250,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_WAIT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_WAIT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_WAIT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -245,7 +261,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_INT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -256,7 +272,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_INT_W1S(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT_W1S",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT_W1S",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -267,7 +283,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1S(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1S",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1S",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -278,7 +294,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1C(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1C",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1C",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -368,296 +384,296 @@ nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, __io void **ctx_p)
 }
 
 static inline void
-nix_cn9k_lf_sq_dump(__io struct nix_sq_ctx_s *ctx, uint32_t *sqb_aura_p)
+nix_cn9k_lf_sq_dump(__io struct nix_sq_ctx_s *ctx, uint32_t *sqb_aura_p, FILE *file)
 {
-	nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
+	nix_dump(file, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
 		 ctx->sqe_way_mask, ctx->cq);
-	nix_dump("W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
+	nix_dump(file, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
 		 ctx->sdp_mcast, ctx->substream);
-	nix_dump("W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
+	nix_dump(file, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
 		 ctx->ena);
 
-	nix_dump("W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
+	nix_dump(file, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
 		 ctx->sqb_count, ctx->default_chan);
-	nix_dump("W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d",
+	nix_dump(file, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d",
 		 ctx->smq_rr_quantum, ctx->sso_ena);
-	nix_dump("W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
+	nix_dump(file, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
 		 ctx->xoff, ctx->cq_ena, ctx->smq);
 
-	nix_dump("W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
+	nix_dump(file, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
 		 ctx->sqe_stype, ctx->sq_int_ena);
-	nix_dump("W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
+	nix_dump(file, "W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
 		 ctx->sqb_aura);
-	nix_dump("W2: smq_rr_count \t\t%d\n", ctx->smq_rr_count);
+	nix_dump(file, "W2: smq_rr_count \t\t%d\n", ctx->smq_rr_count);
 
-	nix_dump("W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
+	nix_dump(file, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
 		 ctx->smq_next_sq_vld, ctx->smq_pend);
-	nix_dump("W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
+	nix_dump(file, "W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
 		 ctx->smenq_next_sqb_vld, ctx->head_offset);
-	nix_dump("W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
+	nix_dump(file, "W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
 		 ctx->smenq_offset, ctx->tail_offset);
-	nix_dump("W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
+	nix_dump(file, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
 		 ctx->smq_lso_segnum, ctx->smq_next_sq);
-	nix_dump("W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
+	nix_dump(file, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
 		 ctx->lmt_dis);
-	nix_dump("W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
+	nix_dump(file, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
 		 ctx->cq_limit, ctx->max_sqe_size);
 
-	nix_dump("W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
-	nix_dump("W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
-	nix_dump("W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
-	nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
-	nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
+	nix_dump(file, "W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
+	nix_dump(file, "W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
+	nix_dump(file, "W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
+	nix_dump(file, "W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
+	nix_dump(file, "W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
 
-	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
+	nix_dump(file, "W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
 		 ctx->vfi_lso_vld, ctx->vfi_lso_vlan1_ins_ena);
-	nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
+	nix_dump(file, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
 		 ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
-	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
+	nix_dump(file, "W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
 		 ctx->vfi_lso_sb, ctx->vfi_lso_sizem1);
-	nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
+	nix_dump(file, "W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
 
-	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "",
+	nix_dump(file, "W10: scm_lso_rem \t\t0x%" PRIx64 "",
 		 (uint64_t)ctx->scm_lso_rem);
-	nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "",
+	nix_dump(file, "W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W14: dropped_octs \t\t0x%" PRIx64 "",
 		 (uint64_t)ctx->drop_octs);
-	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "",
+	nix_dump(file, "W15: dropped_pkts \t\t0x%" PRIx64 "",
 		 (uint64_t)ctx->drop_pkts);
 
 	*sqb_aura_p = ctx->sqb_aura;
 }
 
 static inline void
-nix_lf_sq_dump(__io struct nix_cn10k_sq_ctx_s *ctx, uint32_t *sqb_aura_p)
+nix_lf_sq_dump(__io struct nix_cn10k_sq_ctx_s *ctx, uint32_t *sqb_aura_p, FILE *file)
 {
-	nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
+	nix_dump(file, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
 		 ctx->sqe_way_mask, ctx->cq);
-	nix_dump("W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
+	nix_dump(file, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
 		 ctx->sdp_mcast, ctx->substream);
-	nix_dump("W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
+	nix_dump(file, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
 		 ctx->ena);
 
-	nix_dump("W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
+	nix_dump(file, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
 		 ctx->sqb_count, ctx->default_chan);
-	nix_dump("W1: smq_rr_weight \t\t%d\nW1: sso_ena \t\t\t%d",
+	nix_dump(file, "W1: smq_rr_weight \t\t%d\nW1: sso_ena \t\t\t%d",
 		 ctx->smq_rr_weight, ctx->sso_ena);
-	nix_dump("W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
+	nix_dump(file, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
 		 ctx->xoff, ctx->cq_ena, ctx->smq);
 
-	nix_dump("W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
+	nix_dump(file, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
 		 ctx->sqe_stype, ctx->sq_int_ena);
-	nix_dump("W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
+	nix_dump(file, "W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
 		 ctx->sqb_aura);
-	nix_dump("W2: smq_rr_count[ub:lb] \t\t%x:%x\n", ctx->smq_rr_count_ub,
+	nix_dump(file, "W2: smq_rr_count[ub:lb] \t\t%x:%x\n", ctx->smq_rr_count_ub,
 		 ctx->smq_rr_count_lb);
 
-	nix_dump("W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
+	nix_dump(file, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
 		 ctx->smq_next_sq_vld, ctx->smq_pend);
-	nix_dump("W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
+	nix_dump(file, "W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
 		 ctx->smenq_next_sqb_vld, ctx->head_offset);
-	nix_dump("W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
+	nix_dump(file, "W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
 		 ctx->smenq_offset, ctx->tail_offset);
-	nix_dump("W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
+	nix_dump(file, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
 		 ctx->smq_lso_segnum, ctx->smq_next_sq);
-	nix_dump("W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
+	nix_dump(file, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
 		 ctx->lmt_dis);
-	nix_dump("W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
+	nix_dump(file, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
 		 ctx->cq_limit, ctx->max_sqe_size);
 
-	nix_dump("W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
-	nix_dump("W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
-	nix_dump("W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
-	nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
-	nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
+	nix_dump(file, "W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
+	nix_dump(file, "W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
+	nix_dump(file, "W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
+	nix_dump(file, "W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
+	nix_dump(file, "W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
 
-	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", ctx->vfi_lso_vld,
+	nix_dump(file, "W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", ctx->vfi_lso_vld,
 		 ctx->vfi_lso_vlan1_ins_ena);
-	nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
+	nix_dump(file, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
 		 ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
-	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", ctx->vfi_lso_sb,
+	nix_dump(file, "W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", ctx->vfi_lso_sb,
 		 ctx->vfi_lso_sizem1);
-	nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
+	nix_dump(file, "W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
 
-	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "", (uint64_t)ctx->scm_lso_rem);
-	nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W13: aged_drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_pkts);
-	nix_dump("W13: aged_drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_octs);
-	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
-	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
+	nix_dump(file, "W10: scm_lso_rem \t\t0x%" PRIx64 "", (uint64_t)ctx->scm_lso_rem);
+	nix_dump(file, "W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W13: aged_drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_pkts);
+	nix_dump(file, "W13: aged_drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_octs);
+	nix_dump(file, "W14: dropped_octs \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump(file, "W15: dropped_pkts \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
 
 	*sqb_aura_p = ctx->sqb_aura;
 }
 
 static inline void
-nix_cn9k_lf_rq_dump(__io struct nix_rq_ctx_s *ctx)
+nix_cn9k_lf_rq_dump(__io struct nix_rq_ctx_s *ctx, FILE *file)
 {
-	nix_dump("W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x",
+	nix_dump(file, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x",
 		 ctx->wqe_aura, ctx->substream);
-	nix_dump("W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
+	nix_dump(file, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
 		 ctx->ena_wqwd);
-	nix_dump("W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
+	nix_dump(file, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
 		 ctx->ipsech_ena, ctx->sso_ena);
-	nix_dump("W0: ena \t\t\t%d\n", ctx->ena);
+	nix_dump(file, "W0: ena \t\t\t%d\n", ctx->ena);
 
-	nix_dump("W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
+	nix_dump(file, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
 		 ctx->lpb_drop_ena, ctx->spb_drop_ena);
-	nix_dump("W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
+	nix_dump(file, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
 		 ctx->xqe_drop_ena, ctx->wqe_caching);
-	nix_dump("W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
+	nix_dump(file, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
 		 ctx->pb_caching, ctx->sso_tt);
-	nix_dump("W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
+	nix_dump(file, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
 		 ctx->lpb_aura);
-	nix_dump("W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
+	nix_dump(file, "W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
 
-	nix_dump("W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
+	nix_dump(file, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
 		 ctx->xqe_hdr_split, ctx->xqe_imm_copy);
-	nix_dump("W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
+	nix_dump(file, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
 		 ctx->xqe_imm_size, ctx->later_skip);
-	nix_dump("W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
+	nix_dump(file, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
 		 ctx->first_skip, ctx->lpb_sizem1);
-	nix_dump("W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
+	nix_dump(file, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
 		 ctx->wqe_skip);
-	nix_dump("W2: spb_sizem1 \t\t\t%d\n", ctx->spb_sizem1);
+	nix_dump(file, "W2: spb_sizem1 \t\t\t%d\n", ctx->spb_sizem1);
 
-	nix_dump("W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
+	nix_dump(file, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
 		 ctx->spb_pool_pass, ctx->spb_pool_drop);
-	nix_dump("W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
+	nix_dump(file, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
 		 ctx->spb_aura_pass, ctx->spb_aura_drop);
-	nix_dump("W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
+	nix_dump(file, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
 		 ctx->wqe_pool_pass, ctx->wqe_pool_drop);
-	nix_dump("W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
+	nix_dump(file, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
 		 ctx->xqe_pass, ctx->xqe_drop);
 
-	nix_dump("W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
+	nix_dump(file, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
 		 ctx->qint_idx, ctx->rq_int_ena);
-	nix_dump("W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
+	nix_dump(file, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
 		 ctx->lpb_pool_pass);
-	nix_dump("W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
+	nix_dump(file, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
 		 ctx->lpb_pool_drop, ctx->lpb_aura_pass);
-	nix_dump("W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
+	nix_dump(file, "W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
 
-	nix_dump("W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
+	nix_dump(file, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
 		 ctx->flow_tagw, ctx->bad_utag);
-	nix_dump("W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
+	nix_dump(file, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
 		 ctx->ltag);
 
-	nix_dump("W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
-	nix_dump("W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
-	nix_dump("W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
+	nix_dump(file, "W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump(file, "W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
+	nix_dump(file, "W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
 }
 
 void
-nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx)
+nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx, FILE *file)
 {
-	nix_dump("W0: wqe_aura \t\t\t%d\nW0: len_ol3_dis \t\t\t%d",
+	nix_dump(file, "W0: wqe_aura \t\t\t%d\nW0: len_ol3_dis \t\t\t%d",
 		 ctx->wqe_aura, ctx->len_ol3_dis);
-	nix_dump("W0: len_ol4_dis \t\t\t%d\nW0: len_il3_dis \t\t\t%d",
+	nix_dump(file, "W0: len_ol4_dis \t\t\t%d\nW0: len_il3_dis \t\t\t%d",
 		 ctx->len_ol4_dis, ctx->len_il3_dis);
-	nix_dump("W0: len_il4_dis \t\t\t%d\nW0: csum_ol4_dis \t\t\t%d",
+	nix_dump(file, "W0: len_il4_dis \t\t\t%d\nW0: csum_ol4_dis \t\t\t%d",
 		 ctx->len_il4_dis, ctx->csum_ol4_dis);
-	nix_dump("W0: csum_ol3_dis \t\t\t%d\nW0: lenerr_dis \t\t\t%d",
+	nix_dump(file, "W0: csum_ol3_dis \t\t\t%d\nW0: lenerr_dis \t\t\t%d",
 		 ctx->csum_ol4_dis, ctx->lenerr_dis);
-	nix_dump("W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
+	nix_dump(file, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
 		 ctx->ena_wqwd);
-	nix_dump("W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
+	nix_dump(file, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
 		 ctx->ipsech_ena, ctx->sso_ena);
-	nix_dump("W0: ena \t\t\t%d\n", ctx->ena);
+	nix_dump(file, "W0: ena \t\t\t%d\n", ctx->ena);
 
-	nix_dump("W1: chi_ena \t\t%d\nW1: ipsecd_drop_en \t\t%d", ctx->chi_ena,
+	nix_dump(file, "W1: chi_ena \t\t%d\nW1: ipsecd_drop_en \t\t%d", ctx->chi_ena,
 		 ctx->ipsecd_drop_en);
-	nix_dump("W1: pb_stashing \t\t\t%d", ctx->pb_stashing);
-	nix_dump("W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
+	nix_dump(file, "W1: pb_stashing \t\t\t%d", ctx->pb_stashing);
+	nix_dump(file, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
 		 ctx->lpb_drop_ena, ctx->spb_drop_ena);
-	nix_dump("W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
+	nix_dump(file, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
 		 ctx->xqe_drop_ena, ctx->wqe_caching);
-	nix_dump("W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
+	nix_dump(file, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
 		 ctx->pb_caching, ctx->sso_tt);
-	nix_dump("W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
+	nix_dump(file, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
 		 ctx->lpb_aura);
-	nix_dump("W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
+	nix_dump(file, "W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
 
-	nix_dump("W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
+	nix_dump(file, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
 		 ctx->xqe_hdr_split, ctx->xqe_imm_copy);
-	nix_dump("W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
+	nix_dump(file, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
 		 ctx->xqe_imm_size, ctx->later_skip);
-	nix_dump("W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
+	nix_dump(file, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
 		 ctx->first_skip, ctx->lpb_sizem1);
-	nix_dump("W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
+	nix_dump(file, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
 		 ctx->wqe_skip);
-	nix_dump("W2: spb_sizem1 \t\t\t%d\nW2: policer_ena \t\t\t%d",
+	nix_dump(file, "W2: spb_sizem1 \t\t\t%d\nW2: policer_ena \t\t\t%d",
 		 ctx->spb_sizem1, ctx->policer_ena);
-	nix_dump("W2: band_prof_id \t\t\t%d", ctx->band_prof_id);
+	nix_dump(file, "W2: band_prof_id \t\t\t%d", ctx->band_prof_id);
 
-	nix_dump("W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
+	nix_dump(file, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
 		 ctx->spb_pool_pass, ctx->spb_pool_drop);
-	nix_dump("W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
+	nix_dump(file, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
 		 ctx->spb_aura_pass, ctx->spb_aura_drop);
-	nix_dump("W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
+	nix_dump(file, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
 		 ctx->wqe_pool_pass, ctx->wqe_pool_drop);
-	nix_dump("W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
+	nix_dump(file, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
 		 ctx->xqe_pass, ctx->xqe_drop);
 
-	nix_dump("W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
+	nix_dump(file, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
 		 ctx->qint_idx, ctx->rq_int_ena);
-	nix_dump("W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
+	nix_dump(file, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
 		 ctx->lpb_pool_pass);
-	nix_dump("W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
+	nix_dump(file, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
 		 ctx->lpb_pool_drop, ctx->lpb_aura_pass);
-	nix_dump("W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
+	nix_dump(file, "W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
 
-	nix_dump("W5: vwqe_skip \t\t\t%d\nW5: max_vsize_exp \t\t\t%d",
+	nix_dump(file, "W5: vwqe_skip \t\t\t%d\nW5: max_vsize_exp \t\t\t%d",
 		 ctx->vwqe_skip, ctx->max_vsize_exp);
-	nix_dump("W5: vtime_wait \t\t\t%d\nW5: vwqe_ena \t\t\t%d",
+	nix_dump(file, "W5: vtime_wait \t\t\t%d\nW5: vwqe_ena \t\t\t%d",
 		 ctx->vtime_wait, ctx->max_vsize_exp);
-	nix_dump("W5: ipsec_vwqe \t\t\t%d", ctx->ipsec_vwqe);
-	nix_dump("W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
+	nix_dump(file, "W5: ipsec_vwqe \t\t\t%d", ctx->ipsec_vwqe);
+	nix_dump(file, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
 		 ctx->flow_tagw, ctx->bad_utag);
-	nix_dump("W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
+	nix_dump(file, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
 		 ctx->ltag);
 
-	nix_dump("W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
-	nix_dump("W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
-	nix_dump("W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
+	nix_dump(file, "W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump(file, "W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
+	nix_dump(file, "W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
 }
 
 static inline void
-nix_lf_cq_dump(__io struct nix_cq_ctx_s *ctx)
+nix_lf_cq_dump(__io struct nix_cq_ctx_s *ctx, FILE *file)
 {
-	nix_dump("W0: base \t\t\t0x%" PRIx64 "\n", ctx->base);
+	nix_dump(file, "W0: base \t\t\t0x%" PRIx64 "\n", ctx->base);
 
-	nix_dump("W1: wrptr \t\t\t%" PRIx64 "", (uint64_t)ctx->wrptr);
-	nix_dump("W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d", ctx->avg_con,
+	nix_dump(file, "W1: wrptr \t\t\t%" PRIx64 "", (uint64_t)ctx->wrptr);
+	nix_dump(file, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d", ctx->avg_con,
 		 ctx->cint_idx);
-	nix_dump("W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d", ctx->cq_err,
+	nix_dump(file, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d", ctx->cq_err,
 		 ctx->qint_idx);
-	nix_dump("W1: bpid  \t\t\t%d\nW1: bp_ena \t\t\t%d\n", ctx->bpid,
+	nix_dump(file, "W1: bpid  \t\t\t%d\nW1: bp_ena \t\t\t%d\n", ctx->bpid,
 		 ctx->bp_ena);
 
-	nix_dump("W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
+	nix_dump(file, "W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
 		 ctx->update_time, ctx->avg_level);
-	nix_dump("W2: head \t\t\t%d\nW2: tail \t\t\t%d\n", ctx->head,
+	nix_dump(file, "W2: head \t\t\t%d\nW2: tail \t\t\t%d\n", ctx->head,
 		 ctx->tail);
 
-	nix_dump("W3: cq_err_int_ena \t\t%d\nW3: cq_err_int \t\t\t%d",
+	nix_dump(file, "W3: cq_err_int_ena \t\t%d\nW3: cq_err_int \t\t\t%d",
 		 ctx->cq_err_int_ena, ctx->cq_err_int);
-	nix_dump("W3: qsize \t\t\t%d\nW3: caching \t\t\t%d", ctx->qsize,
+	nix_dump(file, "W3: qsize \t\t\t%d\nW3: caching \t\t\t%d", ctx->qsize,
 		 ctx->caching);
-	nix_dump("W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d", ctx->substream,
+	nix_dump(file, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d", ctx->substream,
 		 ctx->ena);
-	nix_dump("W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d", ctx->drop_ena,
+	nix_dump(file, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d", ctx->drop_ena,
 		 ctx->drop);
-	nix_dump("W3: bp \t\t\t\t%d\n", ctx->bp);
+	nix_dump(file, "W3: bp \t\t\t\t%d\n", ctx->bp);
 }
 
 int
-roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
+roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	int rc = -1, q, rq = nix->nb_rx_queues;
@@ -679,9 +695,9 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			plt_err("Failed to get cq context");
 			goto fail;
 		}
-		nix_dump("============== port=%d cq=%d ===============",
+		nix_dump(file, "============== port=%d cq=%d ===============",
 			 roc_nix->port_id, q);
-		nix_lf_cq_dump(ctx);
+		nix_lf_cq_dump(ctx, file);
 	}
 
 	for (q = 0; q < rq; q++) {
@@ -690,12 +706,12 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			plt_err("Failed to get rq context");
 			goto fail;
 		}
-		nix_dump("============== port=%d rq=%d ===============",
+		nix_dump(file, "============== port=%d rq=%d ===============",
 			 roc_nix->port_id, q);
 		if (roc_model_is_cn9k())
-			nix_cn9k_lf_rq_dump(ctx);
+			nix_cn9k_lf_rq_dump(ctx, file);
 		else
-			nix_lf_rq_dump(ctx);
+			nix_lf_rq_dump(ctx, file);
 	}
 
 	for (q = 0; q < sq; q++) {
@@ -704,12 +720,12 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			plt_err("Failed to get sq context");
 			goto fail;
 		}
-		nix_dump("============== port=%d sq=%d ===============",
+		nix_dump(file, "============== port=%d sq=%d ===============",
 			 roc_nix->port_id, q);
 		if (roc_model_is_cn9k())
-			nix_cn9k_lf_sq_dump(ctx, &sqb_aura);
+			nix_cn9k_lf_sq_dump(ctx, &sqb_aura, file);
 		else
-			nix_lf_sq_dump(ctx, &sqb_aura);
+			nix_lf_sq_dump(ctx, &sqb_aura, file);
 
 		if (!npa_lf) {
 			plt_err("NPA LF does not exist");
@@ -730,15 +746,15 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			continue;
 		}
 
-		nix_dump("\nSQB Aura W0: Pool addr\t\t0x%" PRIx64 "",
+		nix_dump(file, "\nSQB Aura W0: Pool addr\t\t0x%" PRIx64 "",
 			 npa_rsp->aura.pool_addr);
-		nix_dump("SQB Aura W1: ena\t\t\t%d", npa_rsp->aura.ena);
-		nix_dump("SQB Aura W2: count\t\t%" PRIx64 "",
+		nix_dump(file, "SQB Aura W1: ena\t\t\t%d", npa_rsp->aura.ena);
+		nix_dump(file, "SQB Aura W2: count\t\t%" PRIx64 "",
 			 (uint64_t)npa_rsp->aura.count);
-		nix_dump("SQB Aura W3: limit\t\t%" PRIx64 "",
+		nix_dump(file, "SQB Aura W3: limit\t\t%" PRIx64 "",
 			 (uint64_t)npa_rsp->aura.limit);
-		nix_dump("SQB Aura W3: fc_ena\t\t%d", npa_rsp->aura.fc_ena);
-		nix_dump("SQB Aura W4: fc_addr\t\t0x%" PRIx64 "\n",
+		nix_dump(file, "SQB Aura W3: fc_ena\t\t%d", npa_rsp->aura.fc_ena);
+		nix_dump(file, "SQB Aura W4: fc_addr\t\t0x%" PRIx64 "\n",
 			 npa_rsp->aura.fc_addr);
 	}
 
@@ -750,120 +766,122 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 void
 roc_nix_cqe_dump(const struct nix_cqe_hdr_s *cq)
 {
+	FILE *file = NULL;
 	const union nix_rx_parse_u *rx =
 		(const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
 	const uint64_t *sgs = (const uint64_t *)(rx + 1);
 	int i;
 
-	nix_dump("tag \t\t0x%x\tq \t\t%d\t\tnode \t\t%d\tcqe_type \t%d",
+	nix_dump(file, "tag \t\t0x%x\tq \t\t%d\t\tnode \t\t%d\tcqe_type \t%d",
 		 cq->tag, cq->q, cq->node, cq->cqe_type);
 
-	nix_dump("W0: chan \t0x%x\t\tdesc_sizem1 \t%d", rx->chan,
+	nix_dump(file, "W0: chan \t0x%x\t\tdesc_sizem1 \t%d", rx->chan,
 		 rx->desc_sizem1);
-	nix_dump("W0: imm_copy \t%d\t\texpress \t%d", rx->imm_copy,
+	nix_dump(file, "W0: imm_copy \t%d\t\texpress \t%d", rx->imm_copy,
 		 rx->express);
-	nix_dump("W0: wqwd \t%d\t\terrlev \t\t%d\t\terrcode \t%d", rx->wqwd,
+	nix_dump(file, "W0: wqwd \t%d\t\terrlev \t\t%d\t\terrcode \t%d", rx->wqwd,
 		 rx->errlev, rx->errcode);
-	nix_dump("W0: latype \t%d\t\tlbtype \t\t%d\t\tlctype \t\t%d",
+	nix_dump(file, "W0: latype \t%d\t\tlbtype \t\t%d\t\tlctype \t\t%d",
 		 rx->latype, rx->lbtype, rx->lctype);
-	nix_dump("W0: ldtype \t%d\t\tletype \t\t%d\t\tlftype \t\t%d",
+	nix_dump(file, "W0: ldtype \t%d\t\tletype \t\t%d\t\tlftype \t\t%d",
 		 rx->ldtype, rx->letype, rx->lftype);
-	nix_dump("W0: lgtype \t%d \t\tlhtype \t\t%d", rx->lgtype, rx->lhtype);
+	nix_dump(file, "W0: lgtype \t%d \t\tlhtype \t\t%d", rx->lgtype, rx->lhtype);
 
-	nix_dump("W1: pkt_lenm1 \t%d", rx->pkt_lenm1);
-	nix_dump("W1: l2m \t%d\t\tl2b \t\t%d\t\tl3m \t\t%d\tl3b \t\t%d",
+	nix_dump(file, "W1: pkt_lenm1 \t%d", rx->pkt_lenm1);
+	nix_dump(file, "W1: l2m \t%d\t\tl2b \t\t%d\t\tl3m \t\t%d\tl3b \t\t%d",
 		 rx->l2m, rx->l2b, rx->l3m, rx->l3b);
-	nix_dump("W1: vtag0_valid %d\t\tvtag0_gone \t%d", rx->vtag0_valid,
+	nix_dump(file, "W1: vtag0_valid %d\t\tvtag0_gone \t%d", rx->vtag0_valid,
 		 rx->vtag0_gone);
-	nix_dump("W1: vtag1_valid %d\t\tvtag1_gone \t%d", rx->vtag1_valid,
+	nix_dump(file, "W1: vtag1_valid %d\t\tvtag1_gone \t%d", rx->vtag1_valid,
 		 rx->vtag1_gone);
-	nix_dump("W1: pkind \t%d", rx->pkind);
-	nix_dump("W1: vtag0_tci \t%d\t\tvtag1_tci \t%d", rx->vtag0_tci,
+	nix_dump(file, "W1: pkind \t%d", rx->pkind);
+	nix_dump(file, "W1: vtag0_tci \t%d\t\tvtag1_tci \t%d", rx->vtag0_tci,
 		 rx->vtag1_tci);
 
-	nix_dump("W2: laflags \t%d\t\tlbflags\t\t%d\t\tlcflags \t%d",
+	nix_dump(file, "W2: laflags \t%d\t\tlbflags\t\t%d\t\tlcflags \t%d",
 		 rx->laflags, rx->lbflags, rx->lcflags);
-	nix_dump("W2: ldflags \t%d\t\tleflags\t\t%d\t\tlfflags \t%d",
+	nix_dump(file, "W2: ldflags \t%d\t\tleflags\t\t%d\t\tlfflags \t%d",
 		 rx->ldflags, rx->leflags, rx->lfflags);
-	nix_dump("W2: lgflags \t%d\t\tlhflags \t%d", rx->lgflags, rx->lhflags);
+	nix_dump(file, "W2: lgflags \t%d\t\tlhflags \t%d", rx->lgflags, rx->lhflags);
 
-	nix_dump("W3: eoh_ptr \t%d\t\twqe_aura \t%d\t\tpb_aura \t%d",
+	nix_dump(file, "W3: eoh_ptr \t%d\t\twqe_aura \t%d\t\tpb_aura \t%d",
 		 rx->eoh_ptr, rx->wqe_aura, rx->pb_aura);
-	nix_dump("W3: match_id \t%d", rx->match_id);
+	nix_dump(file, "W3: match_id \t%d", rx->match_id);
 
-	nix_dump("W4: laptr \t%d\t\tlbptr \t\t%d\t\tlcptr \t\t%d", rx->laptr,
+	nix_dump(file, "W4: laptr \t%d\t\tlbptr \t\t%d\t\tlcptr \t\t%d", rx->laptr,
 		 rx->lbptr, rx->lcptr);
-	nix_dump("W4: ldptr \t%d\t\tleptr \t\t%d\t\tlfptr \t\t%d", rx->ldptr,
+	nix_dump(file, "W4: ldptr \t%d\t\tleptr \t\t%d\t\tlfptr \t\t%d", rx->ldptr,
 		 rx->leptr, rx->lfptr);
-	nix_dump("W4: lgptr \t%d\t\tlhptr \t\t%d", rx->lgptr, rx->lhptr);
+	nix_dump(file, "W4: lgptr \t%d\t\tlhptr \t\t%d", rx->lgptr, rx->lhptr);
 
-	nix_dump("W5: vtag0_ptr \t%d\t\tvtag1_ptr \t%d\t\tflow_key_alg \t%d",
+	nix_dump(file, "W5: vtag0_ptr \t%d\t\tvtag1_ptr \t%d\t\tflow_key_alg \t%d",
 		 rx->vtag0_ptr, rx->vtag1_ptr, rx->flow_key_alg);
 
 	for (i = 0; i < (rx->desc_sizem1 + 1) << 1; i++)
-		nix_dump("sg[%u] = %p", i, (void *)sgs[i]);
+		nix_dump(file, "sg[%u] = %p", i, (void *)sgs[i]);
 }
 
 void
-roc_nix_rq_dump(struct roc_nix_rq *rq)
+roc_nix_rq_dump(struct roc_nix_rq *rq, FILE *file)
 {
-	nix_dump("nix_rq@%p", rq);
-	nix_dump("  qid = %d", rq->qid);
-	nix_dump("  aura_handle = 0x%" PRIx64 "", rq->aura_handle);
-	nix_dump("  ipsec_ena = %d", rq->ipsech_ena);
-	nix_dump("  first_skip = %d", rq->first_skip);
-	nix_dump("  later_skip = %d", rq->later_skip);
-	nix_dump("  lpb_size = %d", rq->lpb_size);
-	nix_dump("  sso_ena = %d", rq->sso_ena);
-	nix_dump("  tag_mask = %d", rq->tag_mask);
-	nix_dump("  flow_tag_width = %d", rq->flow_tag_width);
-	nix_dump("  tt = %d", rq->tt);
-	nix_dump("  hwgrp = %d", rq->hwgrp);
-	nix_dump("  vwqe_ena = %d", rq->vwqe_ena);
-	nix_dump("  vwqe_first_skip = %d", rq->vwqe_first_skip);
-	nix_dump("  vwqe_max_sz_exp = %d", rq->vwqe_max_sz_exp);
-	nix_dump("  vwqe_wait_tmo = %ld", rq->vwqe_wait_tmo);
-	nix_dump("  vwqe_aura_handle = %ld", rq->vwqe_aura_handle);
-	nix_dump("  roc_nix = %p", rq->roc_nix);
-	nix_dump("  inl_dev_refs = %d", rq->inl_dev_refs);
+	nix_dump(file, "nix_rq@%p", rq);
+	nix_dump(file, "  qid = %d", rq->qid);
+	nix_dump(file, "  aura_handle = 0x%" PRIx64 "", rq->aura_handle);
+	nix_dump(file, "  ipsec_ena = %d", rq->ipsech_ena);
+	nix_dump(file, "  first_skip = %d", rq->first_skip);
+	nix_dump(file, "  later_skip = %d", rq->later_skip);
+	nix_dump(file, "  lpb_size = %d", rq->lpb_size);
+	nix_dump(file, "  sso_ena = %d", rq->sso_ena);
+	nix_dump(file, "  tag_mask = %d", rq->tag_mask);
+	nix_dump(file, "  flow_tag_width = %d", rq->flow_tag_width);
+	nix_dump(file, "  tt = %d", rq->tt);
+	nix_dump(file, "  hwgrp = %d", rq->hwgrp);
+	nix_dump(file, "  vwqe_ena = %d", rq->vwqe_ena);
+	nix_dump(file, "  vwqe_first_skip = %d", rq->vwqe_first_skip);
+	nix_dump(file, "  vwqe_max_sz_exp = %d", rq->vwqe_max_sz_exp);
+	nix_dump(file, "  vwqe_wait_tmo = %ld", rq->vwqe_wait_tmo);
+	nix_dump(file, "  vwqe_aura_handle = %ld", rq->vwqe_aura_handle);
+	nix_dump(file, "  roc_nix = %p", rq->roc_nix);
+	nix_dump(file, "  inl_dev_refs = %d", rq->inl_dev_refs);
 }
 
 void
-roc_nix_cq_dump(struct roc_nix_cq *cq)
+roc_nix_cq_dump(struct roc_nix_cq *cq, FILE *file)
 {
-	nix_dump("nix_cq@%p", cq);
-	nix_dump("  qid = %d", cq->qid);
-	nix_dump("  qnb_desc = %d", cq->nb_desc);
-	nix_dump("  roc_nix = %p", cq->roc_nix);
-	nix_dump("  door = 0x%" PRIx64 "", cq->door);
-	nix_dump("  status = %p", cq->status);
-	nix_dump("  wdata = 0x%" PRIx64 "", cq->wdata);
-	nix_dump("  desc_base = %p", cq->desc_base);
-	nix_dump("  qmask = 0x%" PRIx32 "", cq->qmask);
+	nix_dump(file, "nix_cq@%p", cq);
+	nix_dump(file, "  qid = %d", cq->qid);
+	nix_dump(file, "  qnb_desc = %d", cq->nb_desc);
+	nix_dump(file, "  roc_nix = %p", cq->roc_nix);
+	nix_dump(file, "  door = 0x%" PRIx64 "", cq->door);
+	nix_dump(file, "  status = %p", cq->status);
+	nix_dump(file, "  wdata = 0x%" PRIx64 "", cq->wdata);
+	nix_dump(file, "  desc_base = %p", cq->desc_base);
+	nix_dump(file, "  qmask = 0x%" PRIx32 "", cq->qmask);
 }
 
 void
-roc_nix_sq_dump(struct roc_nix_sq *sq)
+roc_nix_sq_dump(struct roc_nix_sq *sq, FILE *file)
 {
-	nix_dump("nix_sq@%p", sq);
-	nix_dump("  qid = %d", sq->qid);
-	nix_dump("  max_sqe_sz = %d", sq->max_sqe_sz);
-	nix_dump("  nb_desc = %d", sq->nb_desc);
-	nix_dump("  sqes_per_sqb_log2 = %d", sq->sqes_per_sqb_log2);
-	nix_dump("  roc_nix= %p", sq->roc_nix);
-	nix_dump("  aura_handle = 0x%" PRIx64 "", sq->aura_handle);
-	nix_dump("  nb_sqb_bufs_adj = %d", sq->nb_sqb_bufs_adj);
-	nix_dump("  nb_sqb_bufs = %d", sq->nb_sqb_bufs);
-	nix_dump("  io_addr = 0x%" PRIx64 "", sq->io_addr);
-	nix_dump("  lmt_addr = %p", sq->lmt_addr);
-	nix_dump("  sqe_mem = %p", sq->sqe_mem);
-	nix_dump("  fc = %p", sq->fc);
+	nix_dump(file, "nix_sq@%p", sq);
+	nix_dump(file, "  qid = %d", sq->qid);
+	nix_dump(file, "  max_sqe_sz = %d", sq->max_sqe_sz);
+	nix_dump(file, "  nb_desc = %d", sq->nb_desc);
+	nix_dump(file, "  sqes_per_sqb_log2 = %d", sq->sqes_per_sqb_log2);
+	nix_dump(file, "  roc_nix= %p", sq->roc_nix);
+	nix_dump(file, "  aura_handle = 0x%" PRIx64 "", sq->aura_handle);
+	nix_dump(file, "  nb_sqb_bufs_adj = %d", sq->nb_sqb_bufs_adj);
+	nix_dump(file, "  nb_sqb_bufs = %d", sq->nb_sqb_bufs);
+	nix_dump(file, "  io_addr = 0x%" PRIx64 "", sq->io_addr);
+	nix_dump(file, "  lmt_addr = %p", sq->lmt_addr);
+	nix_dump(file, "  sqe_mem = %p", sq->sqe_mem);
+	nix_dump(file, "  fc = %p", sq->fc);
 };
 
 static uint8_t
 nix_tm_reg_dump_prep(uint16_t hw_lvl, uint16_t schq, uint16_t link,
 		     uint64_t *reg, char regstr[][NIX_REG_NAME_SZ])
 {
+	FILE *file = NULL;
 	uint8_t k = 0;
 
 	switch (hw_lvl) {
@@ -1022,7 +1040,7 @@ nix_tm_reg_dump_prep(uint16_t hw_lvl, uint16_t schq, uint16_t link,
 	}
 
 	if (k > MAX_REGS_PER_MBOX_MSG) {
-		nix_dump("\t!!!NIX TM Registers request overflow!!!");
+		nix_dump(file, "\t!!!NIX TM Registers request overflow!!!");
 		return 0;
 	}
 	return k;
@@ -1040,6 +1058,7 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 	struct nix_tm_node *root = NULL;
 	uint32_t schq, parent_schq;
 	bool found = false;
+	FILE *file = NULL;
 	uint8_t j, k, rc;
 
 	TAILQ_FOREACH(node, list, node) {
@@ -1067,7 +1086,7 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 			parent_lvlstr = nix_tm_hwlvl2str(node->hw_lvl + 1);
 		}
 
-		nix_dump("\t(%p%s) %s_%d->%s_%d", node,
+		nix_dump(file, "\t(%p%s) %s_%d->%s_%d", node,
 			 node->child_realloc ? "[CR]" : "", lvlstr, schq,
 			 parent_lvlstr, parent_schq);
 
@@ -1092,15 +1111,15 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 		rc = mbox_process_msg(mbox, (void **)&rsp);
 		if (!rc) {
 			for (j = 0; j < k; j++)
-				nix_dump("\t\t%s=0x%016" PRIx64, regstr[j],
+				nix_dump(file, "\t\t%s=0x%016" PRIx64, regstr[j],
 					 rsp->regval[j]);
 		} else {
-			nix_dump("\t!!!Failed to dump registers!!!");
+			nix_dump(file, "\t!!!Failed to dump registers!!!");
 		}
 	}
 
 	if (found)
-		nix_dump("\n");
+		nix_dump(file, "\n");
 
 	/* Dump TL1 node data when root level is TL2 */
 	if (root && root->hw_lvl == NIX_TXSCH_LVL_TL2) {
@@ -1117,171 +1136,182 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 		rc = mbox_process_msg(mbox, (void **)&rsp);
 		if (!rc) {
 			for (j = 0; j < k; j++)
-				nix_dump("\t\t%s=0x%016" PRIx64, regstr[j],
+				nix_dump(file, "\t\t%s=0x%016" PRIx64, regstr[j],
 					 rsp->regval[j]);
 		} else {
-			nix_dump("\t!!!Failed to dump registers!!!");
+			nix_dump(file, "\t!!!Failed to dump registers!!!");
 		}
-		nix_dump("\n");
+		nix_dump(file, "\n");
 	}
 }
 
 void
-roc_nix_tm_dump(struct roc_nix *roc_nix)
+roc_nix_tm_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct dev *dev = &nix->dev;
 	uint8_t hw_lvl, i;
 
-	nix_dump("===TM hierarchy and registers dump of %s (pf:vf) (%d:%d)===",
+	nix_dump(file, "===TM hierarchy and registers dump of %s (pf:vf) (%d:%d)===",
 		 nix->pci_dev->name, dev_get_pf(dev->pf_func),
 		 dev_get_vf(dev->pf_func));
 
 	/* Dump all trees */
 	for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++) {
-		nix_dump("\tTM %s:", nix_tm_tree2str(i));
+		nix_dump(file, "\tTM %s:", nix_tm_tree2str(i));
 		for (hw_lvl = 0; hw_lvl <= NIX_TXSCH_LVL_CNT; hw_lvl++)
 			nix_tm_dump_lvl(nix, &nix->trees[i], hw_lvl);
 	}
 
 	/* Dump unused resources */
-	nix_dump("\tTM unused resources:");
+	nix_dump(file, "\tTM unused resources:");
 	hw_lvl = NIX_TXSCH_LVL_SMQ;
 	for (; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
-		nix_dump("\t\ttxschq        %7s num = %d",
+		nix_dump(file, "\t\ttxschq        %7s num = %d",
 			 nix_tm_hwlvl2str(hw_lvl),
 			 nix_tm_resource_avail(nix, hw_lvl, false));
 
-		nix_bitmap_dump(nix->schq_bmp[hw_lvl]);
-		nix_dump("\n");
+		nix_bitmap_dump(nix->schq_bmp[hw_lvl], file);
+		nix_dump(file, "\n");
 
-		nix_dump("\t\ttxschq_contig %7s num = %d",
+		nix_dump(file, "\t\ttxschq_contig %7s num = %d",
 			 nix_tm_hwlvl2str(hw_lvl),
 			 nix_tm_resource_avail(nix, hw_lvl, true));
-		nix_bitmap_dump(nix->schq_contig_bmp[hw_lvl]);
-		nix_dump("\n");
+		nix_bitmap_dump(nix->schq_contig_bmp[hw_lvl], file);
+		nix_dump(file, "\n");
 	}
 }
 
 void
-roc_nix_dump(struct roc_nix *roc_nix)
+roc_nix_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct dev *dev = &nix->dev;
 	int i;
 
-	nix_dump("nix@%p", nix);
-	nix_dump("  pf = %d", dev_get_pf(dev->pf_func));
-	nix_dump("  vf = %d", dev_get_vf(dev->pf_func));
-	nix_dump("  bar2 = 0x%" PRIx64, dev->bar2);
-	nix_dump("  bar4 = 0x%" PRIx64, dev->bar4);
-	nix_dump("  port_id = %d", roc_nix->port_id);
-	nix_dump("  rss_tag_as_xor = %d", roc_nix->rss_tag_as_xor);
-	nix_dump("  rss_tag_as_xor = %d", roc_nix->max_sqb_count);
-	nix_dump("  outb_nb_desc = %u", roc_nix->outb_nb_desc);
+	nix_dump(file, "nix@%p", nix);
+	nix_dump(file, "  pf = %d", dev_get_pf(dev->pf_func));
+	nix_dump(file, "  vf = %d", dev_get_vf(dev->pf_func));
+	nix_dump(file, "  bar2 = 0x%" PRIx64, dev->bar2);
+	nix_dump(file, "  bar4 = 0x%" PRIx64, dev->bar4);
+	nix_dump(file, "  port_id = %d", roc_nix->port_id);
+	nix_dump(file, "  rss_tag_as_xor = %d", roc_nix->rss_tag_as_xor);
+	nix_dump(file, "  rss_tag_as_xor = %d", roc_nix->max_sqb_count);
+	nix_dump(file, "  outb_nb_desc = %u", roc_nix->outb_nb_desc);
 
-	nix_dump("  \tpci_dev = %p", nix->pci_dev);
-	nix_dump("  \tbase = 0x%" PRIxPTR "", nix->base);
-	nix_dump("  \tlmt_base = 0x%" PRIxPTR "", nix->lmt_base);
-	nix_dump("  \treta_size = %d", nix->reta_sz);
-	nix_dump("  \ttx_chan_base = %d", nix->tx_chan_base);
-	nix_dump("  \trx_chan_base = %d", nix->rx_chan_base);
-	nix_dump("  \tnb_rx_queues = %d", nix->nb_rx_queues);
-	nix_dump("  \tnb_tx_queues = %d", nix->nb_tx_queues);
-	nix_dump("  \tlso_tsov6_idx = %d", nix->lso_tsov6_idx);
-	nix_dump("  \tlso_tsov4_idx = %d", nix->lso_tsov4_idx);
-	nix_dump("  \tlso_udp_tun_v4v4 = %d",
+	nix_dump(file, "  \tpci_dev = %p", nix->pci_dev);
+	nix_dump(file, "  \tbase = 0x%" PRIxPTR "", nix->base);
+	nix_dump(file, "  \tlmt_base = 0x%" PRIxPTR "", nix->lmt_base);
+	nix_dump(file, "  \treta_size = %d", nix->reta_sz);
+	nix_dump(file, "  \ttx_chan_base = %d", nix->tx_chan_base);
+	nix_dump(file, "  \trx_chan_base = %d", nix->rx_chan_base);
+	nix_dump(file, "  \tnb_rx_queues = %d", nix->nb_rx_queues);
+	nix_dump(file, "  \tnb_tx_queues = %d", nix->nb_tx_queues);
+	nix_dump(file, "  \tlso_tsov6_idx = %d", nix->lso_tsov6_idx);
+	nix_dump(file, "  \tlso_tsov4_idx = %d", nix->lso_tsov4_idx);
+	nix_dump(file, "  \tlso_udp_tun_v4v4 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V4V4]);
-	nix_dump("  \tlso_udp_tun_v4v6 = %d",
+	nix_dump(file, "  \tlso_udp_tun_v4v6 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V4V6]);
-	nix_dump("  \tlso_udp_tun_v6v4 = %d",
+	nix_dump(file, "  \tlso_udp_tun_v6v4 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V6V4]);
-	nix_dump("  \tlso_udp_tun_v6v6 = %d",
+	nix_dump(file, "  \tlso_udp_tun_v6v6 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V6V6]);
-	nix_dump("  \tlso_tun_v4v4 = %d",
+	nix_dump(file, "  \tlso_tun_v4v4 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V4V4]);
-	nix_dump("  \tlso_tun_v4v6 = %d",
+	nix_dump(file, "  \tlso_tun_v4v6 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V4V6]);
-	nix_dump("  \tlso_tun_v6v4 = %d",
+	nix_dump(file, "  \tlso_tun_v6v4 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V6V4]);
-	nix_dump("  \tlso_tun_v6v6 = %d",
+	nix_dump(file, "  \tlso_tun_v6v6 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V6V6]);
-	nix_dump("  \tlf_rx_stats = %d", nix->lf_rx_stats);
-	nix_dump("  \tlf_tx_stats = %d", nix->lf_tx_stats);
-	nix_dump("  \trx_chan_cnt = %d", nix->rx_chan_cnt);
-	nix_dump("  \ttx_chan_cnt = %d", nix->tx_chan_cnt);
-	nix_dump("  \tcgx_links = %d", nix->cgx_links);
-	nix_dump("  \tlbk_links = %d", nix->lbk_links);
-	nix_dump("  \tsdp_links = %d", nix->sdp_links);
-	nix_dump("  \ttx_link = %d", nix->tx_link);
-	nix_dump("  \tsqb_size = %d", nix->sqb_size);
-	nix_dump("  \tmsixoff = %d", nix->msixoff);
+	nix_dump(file, "  \tlf_rx_stats = %d", nix->lf_rx_stats);
+	nix_dump(file, "  \tlf_tx_stats = %d", nix->lf_tx_stats);
+	nix_dump(file, "  \trx_chan_cnt = %d", nix->rx_chan_cnt);
+	nix_dump(file, "  \ttx_chan_cnt = %d", nix->tx_chan_cnt);
+	nix_dump(file, "  \tcgx_links = %d", nix->cgx_links);
+	nix_dump(file, "  \tlbk_links = %d", nix->lbk_links);
+	nix_dump(file, "  \tsdp_links = %d", nix->sdp_links);
+	nix_dump(file, "  \ttx_link = %d", nix->tx_link);
+	nix_dump(file, "  \tsqb_size = %d", nix->sqb_size);
+	nix_dump(file, "  \tmsixoff = %d", nix->msixoff);
 	for (i = 0; i < nix->nb_cpt_lf; i++)
-		nix_dump("  \tcpt_msixoff[%d] = %d", i, nix->cpt_msixoff[i]);
-	nix_dump("  \tcints = %d", nix->cints);
-	nix_dump("  \tqints = %d", nix->qints);
-	nix_dump("  \tsdp_link = %d", nix->sdp_link);
-	nix_dump("  \tptp_en = %d", nix->ptp_en);
-	nix_dump("  \trss_alg_idx = %d", nix->rss_alg_idx);
-	nix_dump("  \ttx_pause = %d", nix->tx_pause);
-	nix_dump("  \tinl_inb_ena = %d", nix->inl_inb_ena);
-	nix_dump("  \tinl_outb_ena = %d", nix->inl_outb_ena);
-	nix_dump("  \tinb_sa_base = 0x%p", nix->inb_sa_base);
-	nix_dump("  \tinb_sa_sz = %" PRIu64, nix->inb_sa_sz);
-	nix_dump("  \toutb_sa_base = 0x%p", nix->outb_sa_base);
-	nix_dump("  \toutb_sa_sz = %" PRIu64, nix->outb_sa_sz);
-	nix_dump("  \toutb_err_sso_pffunc = 0x%x", nix->outb_err_sso_pffunc);
-	nix_dump("  \tcpt_lf_base = 0x%p", nix->cpt_lf_base);
-	nix_dump("  \tnb_cpt_lf = %d", nix->nb_cpt_lf);
-	nix_dump("  \tinb_inl_dev = %d", nix->inb_inl_dev);
+		nix_dump(file, "  \tcpt_msixoff[%d] = %d", i, nix->cpt_msixoff[i]);
+	nix_dump(file, "  \tcints = %d", nix->cints);
+	nix_dump(file, "  \tqints = %d", nix->qints);
+	nix_dump(file, "  \tsdp_link = %d", nix->sdp_link);
+	nix_dump(file, "  \tptp_en = %d", nix->ptp_en);
+	nix_dump(file, "  \trss_alg_idx = %d", nix->rss_alg_idx);
+	nix_dump(file, "  \ttx_pause = %d", nix->tx_pause);
+	nix_dump(file, "  \tinl_inb_ena = %d", nix->inl_inb_ena);
+	nix_dump(file, "  \tinl_outb_ena = %d", nix->inl_outb_ena);
+	nix_dump(file, "  \tinb_sa_base = 0x%p", nix->inb_sa_base);
+	nix_dump(file, "  \tinb_sa_sz = %" PRIu64, nix->inb_sa_sz);
+	nix_dump(file, "  \toutb_sa_base = 0x%p", nix->outb_sa_base);
+	nix_dump(file, "  \toutb_sa_sz = %" PRIu64, nix->outb_sa_sz);
+	nix_dump(file, "  \toutb_err_sso_pffunc = 0x%x", nix->outb_err_sso_pffunc);
+	nix_dump(file, "  \tcpt_lf_base = 0x%p", nix->cpt_lf_base);
+	nix_dump(file, "  \tnb_cpt_lf = %d", nix->nb_cpt_lf);
+	nix_dump(file, "  \tinb_inl_dev = %d", nix->inb_inl_dev);
+
 }
 
 void
-roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
+roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev, FILE *file)
 {
-	struct nix_inl_dev *inl_dev =
-		(struct nix_inl_dev *)&roc_inl_dev->reserved;
-	struct dev *dev = &inl_dev->dev;
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+	struct dev *dev = NULL;
 	int i;
 
-	nix_dump("nix_inl_dev@%p", inl_dev);
-	nix_dump("  pf = %d", dev_get_pf(dev->pf_func));
-	nix_dump("  vf = %d", dev_get_vf(dev->pf_func));
-	nix_dump("  bar2 = 0x%" PRIx64, dev->bar2);
-	nix_dump("  bar4 = 0x%" PRIx64, dev->bar4);
+	if (roc_inl_dev) {
+		inl_dev = (struct nix_inl_dev *)&roc_inl_dev->reserved;
+	} else {
+		if (idev && idev->nix_inl_dev)
+			inl_dev = idev->nix_inl_dev;
+		else
+			return;
+	}
 
-	nix_dump("  \tpci_dev = %p", inl_dev->pci_dev);
-	nix_dump("  \tnix_base = 0x%" PRIxPTR "", inl_dev->nix_base);
-	nix_dump("  \tsso_base = 0x%" PRIxPTR "", inl_dev->sso_base);
-	nix_dump("  \tssow_base = 0x%" PRIxPTR "", inl_dev->ssow_base);
-	nix_dump("  \tnix_msixoff = %d", inl_dev->nix_msixoff);
-	nix_dump("  \tsso_msixoff = %d", inl_dev->sso_msixoff);
-	nix_dump("  \tssow_msixoff = %d", inl_dev->ssow_msixoff);
-	nix_dump("  \tnix_cints = %d", inl_dev->cints);
-	nix_dump("  \tnix_qints = %d", inl_dev->qints);
-	nix_dump("  \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
-	nix_dump("  \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
-	nix_dump("  \txaq_buf_size = %u", inl_dev->xaq_buf_size);
-	nix_dump("  \txae_waes = %u", inl_dev->xae_waes);
-	nix_dump("  \tiue = %u", inl_dev->iue);
-	nix_dump("  \txaq_aura = 0x%" PRIx64, inl_dev->xaq.aura_handle);
-	nix_dump("  \txaq_mem = 0x%p", inl_dev->xaq.mem);
+	dev = &inl_dev->dev;
+	nix_dump(file, "nix_inl_dev@%p", inl_dev);
+	nix_dump(file, "  pf = %d", dev_get_pf(dev->pf_func));
+	nix_dump(file, "  vf = %d", dev_get_vf(dev->pf_func));
+	nix_dump(file, "  bar2 = 0x%" PRIx64, dev->bar2);
+	nix_dump(file, "  bar4 = 0x%" PRIx64, dev->bar4);
 
-	nix_dump("  \tinl_dev_rq:");
+	nix_dump(file, "  \tpci_dev = %p", inl_dev->pci_dev);
+	nix_dump(file, "  \tnix_base = 0x%" PRIxPTR "", inl_dev->nix_base);
+	nix_dump(file, "  \tsso_base = 0x%" PRIxPTR "", inl_dev->sso_base);
+	nix_dump(file, "  \tssow_base = 0x%" PRIxPTR "", inl_dev->ssow_base);
+	nix_dump(file, "  \tnix_msixoff = %d", inl_dev->nix_msixoff);
+	nix_dump(file, "  \tsso_msixoff = %d", inl_dev->sso_msixoff);
+	nix_dump(file, "  \tssow_msixoff = %d", inl_dev->ssow_msixoff);
+	nix_dump(file, "  \tnix_cints = %d", inl_dev->cints);
+	nix_dump(file, "  \tnix_qints = %d", inl_dev->qints);
+	nix_dump(file, "  \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
+	nix_dump(file, "  \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
+	nix_dump(file, "  \txaq_buf_size = %u", inl_dev->xaq_buf_size);
+	nix_dump(file, "  \txae_waes = %u", inl_dev->xae_waes);
+	nix_dump(file, "  \tiue = %u", inl_dev->iue);
+	nix_dump(file, "  \txaq_aura = 0x%" PRIx64, inl_dev->xaq.aura_handle);
+	nix_dump(file, "  \txaq_mem = 0x%p", inl_dev->xaq.mem);
+
+	nix_dump(file, "  \tinl_dev_rq:");
 	for (i = 0; i < inl_dev->nb_rqs; i++)
-		roc_nix_rq_dump(&inl_dev->rqs[i]);
+		roc_nix_rq_dump(&inl_dev->rqs[i], file);
 }
 
 void
-roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix)
+roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
 	int i;
 
-	nix_dump("nix@%p", nix);
+	nix_dump(file, "nix@%p", nix);
 	for (i = 0; i < nix->nb_cpt_lf; i++) {
-		nix_dump("NIX inline dev outbound CPT LFs:");
+		nix_dump(file, "NIX inline dev outbound CPT LFs:");
 		cpt_lf_print(&lf_base[i]);
 	}
 }
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 555cb28c1a..019cf6d28b 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -195,7 +195,7 @@ struct roc_nix_inl_dev {
 /* NIX Inline Device API */
 int __roc_api roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev);
 int __roc_api roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev);
-void __roc_api roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev);
+void __roc_api roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev, FILE *file);
 bool __roc_api roc_nix_inl_dev_is_probed(void);
 void __roc_api roc_nix_inl_dev_lock(void);
 void __roc_api roc_nix_inl_dev_unlock(void);
@@ -257,6 +257,6 @@ int __roc_api roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
 				  enum roc_nix_inl_sa_sync_op op);
 int __roc_api roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr,
 				    void *sa_cptr, bool inb, uint16_t sa_len);
-void __roc_api roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix);
+void __roc_api roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file);
 
 #endif /* _ROC_NIX_INL_H_ */
diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 5c19bc33fc..445b440447 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -230,7 +230,7 @@ nix_inl_nix_q_irq(void *param)
 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
 			continue;
 		}
-		nix_lf_rq_dump(ctx);
+		nix_lf_rq_dump(ctx, NULL);
 	}
 }
 
@@ -262,7 +262,7 @@ nix_inl_nix_ras_irq(void *param)
 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
 			continue;
 		}
-		nix_lf_rq_dump(ctx);
+		nix_lf_rq_dump(ctx, NULL);
 	}
 }
 
@@ -295,7 +295,7 @@ nix_inl_nix_err_irq(void *param)
 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
 			continue;
 		}
-		nix_lf_rq_dump(ctx);
+		nix_lf_rq_dump(ctx, NULL);
 	}
 }
 
diff --git a/drivers/common/cnxk/roc_nix_irq.c b/drivers/common/cnxk/roc_nix_irq.c
index 71971ef261..d72980fb18 100644
--- a/drivers/common/cnxk/roc_nix_irq.c
+++ b/drivers/common/cnxk/roc_nix_irq.c
@@ -76,7 +76,7 @@ nix_lf_err_irq(void *param)
 	plt_write64(intr, nix->base + NIX_LF_ERR_INT);
 	/* Dump registers to std out */
 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
-	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
+	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
 }
 
 static int
@@ -125,7 +125,7 @@ nix_lf_ras_irq(void *param)
 
 	/* Dump registers to std out */
 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
-	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
+	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
 }
 
 static int
@@ -320,7 +320,7 @@ nix_lf_q_irq(void *param)
 
 	/* Dump registers to std out */
 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
-	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
+	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
 }
 
 int
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index a253f412de..2eba44c248 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -455,7 +455,7 @@ struct nix_tm_shaper_profile *nix_tm_shaper_profile_alloc(void);
 void nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile);
 
 uint64_t nix_get_blkaddr(struct dev *dev);
-void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx);
+void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx, FILE *file);
 int nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data);
 int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data,
 			 uint8_t lf_tx_stats, uint8_t lf_rx_stats);
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 81d491a3fd..81fa6b1d93 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -606,8 +606,8 @@ roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
 
 	return 0;
 exit:
-	roc_nix_tm_dump(sq->roc_nix);
-	roc_nix_queues_ctx_dump(sq->roc_nix);
+	roc_nix_tm_dump(sq->roc_nix, NULL);
+	roc_nix_queues_ctx_dump(sq->roc_nix, NULL);
 	return -EFAULT;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v2 31/31] net/cnxk: dumps device private information
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (28 preceding siblings ...)
  2022-09-05 13:32   ` [PATCH v2 30/31] common/cnxk: dump device basic info to file Nithin Dabilpuram
@ 2022-09-05 13:32   ` Nithin Dabilpuram
  29 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-05 13:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Add support for ethdev private data dump callback for
debugging purposes.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     |  1 +
 drivers/net/cnxk/cnxk_ethdev.h     |  1 +
 drivers/net/cnxk/cnxk_ethdev_ops.c | 29 +++++++++++++++++++++++++++++
 3 files changed, 31 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 89f8cc107d..48d6bedb89 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1682,6 +1682,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
 	.tm_ops_get = cnxk_nix_tm_ops_get,
 	.mtr_ops_get = cnxk_nix_mtr_ops_get,
+	.eth_dev_priv_dump  = cnxk_nix_eth_dev_priv_dump,
 };
 
 static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index bed0e0eada..c09e9bff8e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -585,6 +585,7 @@ int cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 			     struct rte_eth_rss_conf *rss_conf);
 int cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			       struct rte_eth_rss_conf *rss_conf);
+int cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file);
 
 /* Link */
 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 64beabdd12..0a8b36342a 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -931,6 +931,35 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int i;
+
+	roc_nix_dump(roc_nix, file);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		roc_nix_rq_dump(&dev->rqs[i], file);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		roc_nix_cq_dump(&dev->cqs[i], file);
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		roc_nix_sq_dump(&dev->sqs[i], file);
+
+	roc_nix_queues_ctx_dump(roc_nix, file);
+
+	roc_nix_tm_dump(roc_nix, file);
+
+	roc_nix_inl_dev_dump(NULL, file);
+
+	roc_nix_inl_outb_cpt_lfs_dump(roc_nix, file);
+
+	return 0;
+}
+
 int
 cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 			 struct rte_eth_rss_conf *rss_conf)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable
  2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                   ` (23 preceding siblings ...)
  2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
@ 2022-09-12 13:13 ` Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
                     ` (30 more replies)
  24 siblings, 31 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:13 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add support to enable PTP per ethernet device when that
specific ethernet device is connected to event device via
Rx adapter.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
v3:
- Included this patch from series 23650 tp resolve compilation
  dependency.
- Fixed commit header for patch 29/32
- Fixed commit message for patch 15/32

v2: 
- Included this patch from series 24029 as suggested by Jerin to resolve
  compilation dependency with event dev.
- Fixed one-liner commit messages
- Added few more patches for upstream.

 drivers/common/cnxk/roc_io.h             |  5 ++-
 drivers/event/cnxk/cn10k_eventdev.c      |  9 ++---
 drivers/event/cnxk/cn10k_worker.h        | 48 +++++++++++++++---------
 drivers/event/cnxk/cn9k_eventdev.c       | 13 +++----
 drivers/event/cnxk/cn9k_worker.h         | 32 +++++++++++-----
 drivers/event/cnxk/cnxk_eventdev.h       | 14 ++++---
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  9 +++++
 drivers/net/cnxk/cn10k_rx.h              |  3 +-
 8 files changed, 82 insertions(+), 51 deletions(-)

diff --git a/drivers/common/cnxk/roc_io.h b/drivers/common/cnxk/roc_io.h
index 9d73e263f7..13f98ed549 100644
--- a/drivers/common/cnxk/roc_io.h
+++ b/drivers/common/cnxk/roc_io.h
@@ -161,14 +161,15 @@ roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
 {
 	volatile const __uint128_t *src128 = (const __uint128_t *)in;
 	volatile __uint128_t *dst128 = (__uint128_t *)out;
+	uint32_t i;
 
 	dst128[0] = src128[0];
 	dst128[1] = src128[1];
 	/* lmtext receives following value:
 	 * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
 	 */
-	if (lmtext)
-		dst128[2] = src128[2];
+	for (i = 0; i < lmtext; i++)
+		dst128[2 + i] = src128[2 + i];
 }
 
 static __plt_always_inline void
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 5a0cab40a9..0be7ebfe29 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -694,8 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
 }
 
 static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
-		       void *tstmp_info)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int i;
@@ -703,7 +702,7 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
 	for (i = 0; i < dev->nb_event_ports; i++) {
 		struct cn10k_sso_hws *ws = event_dev->data->ports[i];
 		ws->lookup_mem = lookup_mem;
-		ws->tstamp = tstmp_info;
+		ws->tstamp = dev->tstamp;
 	}
 }
 
@@ -715,7 +714,6 @@ cn10k_sso_rx_adapter_queue_add(
 {
 	struct cn10k_eth_rxq *rxq;
 	void *lookup_mem;
-	void *tstmp_info;
 	int rc;
 
 	rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
@@ -728,8 +726,7 @@ cn10k_sso_rx_adapter_queue_add(
 		return -EINVAL;
 	rxq = eth_dev->data->rx_queues[0];
 	lookup_mem = rxq->lookup_mem;
-	tstmp_info = rxq->tstamp;
-	cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+	cn10k_sso_set_priv_mem(event_dev, lookup_mem);
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
 	return 0;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 0915f404e0..db56d96404 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -108,12 +108,29 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id,
 			      mbuf_init | ((uint64_t)port_id) << 48, flags);
 }
 
+static void
+cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
+			 struct cnxk_timesync_info *tstamp)
+{
+	uint64_t tstamp_ptr;
+	uint8_t laptr;
+
+	laptr = (uint8_t) *
+		(uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
+	if (laptr == sizeof(uint64_t)) {
+		/* Extracting tstamp, if PTP enabled*/
+		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
+					   CNXK_SSO_WQE_SG_PTR);
+		cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
+					 (uint64_t *)tstamp_ptr);
+	}
+}
+
 static __rte_always_inline void
 cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 		   void *lookup_mem, void *tstamp, uintptr_t lbase)
 {
-	uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
-			     (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+	uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
 	struct rte_event_vector *vec;
 	uint64_t aura_handle, laddr;
 	uint16_t nb_mbufs, non_vec;
@@ -133,6 +150,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 	for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
 		rte_prefetch0(&vec->ptrs[i]);
 
+	if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
+		mbuf_init |= 8;
+
 	nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
 	nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
 					      flags | NIX_RX_VWQE_F, lookup_mem,
@@ -158,7 +178,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 
 	while (non_vec) {
 		struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
-		uint64_t tstamp_ptr;
 
 		mbuf = (struct rte_mbuf *)((char *)cqe -
 					   sizeof(struct rte_mbuf));
@@ -178,12 +197,10 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 
 		cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
 				      mbuf_init, flags);
-		/* Extracting tstamp, if PTP enabled*/
-		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
-					   CNXK_SSO_WQE_SG_PTR);
-		cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
-					flags & NIX_RX_OFFLOAD_TSTAMP_F,
-					(uint64_t *)tstamp_ptr);
+
+		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+			cn10k_sso_process_tstamp((uint64_t)wqe[0],
+						 (uint64_t)mbuf, tstamp);
 		wqe[0] = (struct rte_mbuf *)mbuf;
 		non_vec--;
 		wqe++;
@@ -200,8 +217,6 @@ static __rte_always_inline void
 cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 			   const uint32_t flags)
 {
-	uint64_t tstamp_ptr;
-
 	u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
 		 (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
 	if ((flags & CPT_RX_WQE_F) &&
@@ -246,12 +261,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
 		cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
 				  ws->lookup_mem);
-		/* Extracting tstamp, if PTP enabled*/
-		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
-					   CNXK_SSO_WQE_SG_PTR);
-		cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
-					 flags & NIX_RX_OFFLOAD_TSTAMP_F,
-					 (uint64_t *)tstamp_ptr);
+		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+			cn10k_sso_process_tstamp(u64[1], mbuf,
+						 ws->tstamp[port]);
 		u64[1] = mbuf;
 	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
 		   RTE_EVENT_TYPE_ETHDEV_VECTOR) {
@@ -262,7 +274,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 			   ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
 		*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
 		cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
-				   ws->tstamp, ws->lmt_base);
+				   ws->tstamp[port], ws->lmt_base);
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 2e27030049..8ade30f84b 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -123,7 +123,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
 	uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
-	struct cnxk_timesync_info *tstamp;
+	struct cnxk_timesync_info **tstamp;
 	struct cn9k_sso_hws_dual *dws;
 	struct cn9k_sso_hws *ws;
 	uint64_t cq_ds_cnt = 1;
@@ -942,8 +942,7 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
 }
 
 static void
-cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
-		      void *tstmp_info)
+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int i;
@@ -953,11 +952,11 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
 			struct cn9k_sso_hws_dual *dws =
 				event_dev->data->ports[i];
 			dws->lookup_mem = lookup_mem;
-			dws->tstamp = tstmp_info;
+			dws->tstamp = dev->tstamp;
 		} else {
 			struct cn9k_sso_hws *ws = event_dev->data->ports[i];
 			ws->lookup_mem = lookup_mem;
-			ws->tstamp = tstmp_info;
+			ws->tstamp = dev->tstamp;
 		}
 	}
 }
@@ -970,7 +969,6 @@ cn9k_sso_rx_adapter_queue_add(
 {
 	struct cn9k_eth_rxq *rxq;
 	void *lookup_mem;
-	void *tstmp_info;
 	int rc;
 
 	rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
@@ -984,8 +982,7 @@ cn9k_sso_rx_adapter_queue_add(
 
 	rxq = eth_dev->data->rx_queues[0];
 	lookup_mem = rxq->lookup_mem;
-	tstmp_info = rxq->tstamp;
-	cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+	cn9k_sso_set_priv_mem(event_dev, lookup_mem);
 	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
 	return 0;
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 64e97e321a..54b3545022 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -169,13 +169,29 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
 			     mbuf_init | ((uint64_t)port_id) << 48, flags);
 }
 
+static void
+cn9k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
+			struct cnxk_timesync_info *tstamp)
+{
+	uint64_t tstamp_ptr;
+	uint8_t laptr;
+
+	laptr = (uint8_t) *
+		(uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
+	if (laptr == sizeof(uint64_t)) {
+		/* Extracting tstamp, if PTP enabled*/
+		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
+					   CNXK_SSO_WQE_SG_PTR);
+		cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
+					(uint64_t *)tstamp_ptr);
+	}
+}
+
 static __rte_always_inline void
 cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 			  const void *const lookup_mem,
-			  struct cnxk_timesync_info *tstamp)
+			  struct cnxk_timesync_info **tstamp)
 {
-	uint64_t tstamp_ptr;
-
 	u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
 		 (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
 	if ((flags & CPT_RX_WQE_F) &&
@@ -187,12 +203,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 		u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
 		cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
 				 lookup_mem);
-		/* Extracting tstamp, if PTP enabled*/
-		tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
-					   CNXK_SSO_WQE_SG_PTR);
-		cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
-					flags & NIX_RX_OFFLOAD_TSTAMP_F,
-					(uint64_t *)tstamp_ptr);
+		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+			cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
 		u64[1] = mbuf;
 	}
 }
@@ -298,7 +310,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 static __rte_always_inline uint16_t
 cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
 			    const uint32_t flags, void *lookup_mem,
-			    struct cnxk_timesync_info *tstamp)
+			    struct cnxk_timesync_info **tstamp)
 {
 	union {
 		__uint128_t get_work;
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index bfd0c5627e..fae4484758 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -38,6 +38,7 @@
 #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
 #define CNXK_SSO_XAQ_SLACK     (8)
 #define CNXK_SSO_WQE_SG_PTR    (9)
+#define CNXK_SSO_WQE_LAYR_PTR  (5)
 #define CNXK_SSO_PRIORITY_CNT  (0x8)
 #define CNXK_SSO_WEIGHT_MAX    (0x3f)
 #define CNXK_SSO_WEIGHT_MIN    (0x3)
@@ -123,6 +124,7 @@ struct cnxk_sso_evdev {
 	uint64_t *timer_adptr_sz;
 	uint16_t vec_pool_cnt;
 	uint64_t *vec_pools;
+	struct cnxk_timesync_info *tstamp[RTE_MAX_ETHPORTS];
 	struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
 	/* Dev args */
 	uint32_t xae_cnt;
@@ -140,12 +142,12 @@ struct cnxk_sso_evdev {
 struct cn10k_sso_hws {
 	uint64_t base;
 	uint64_t gw_rdata;
-	/* PTP timestamp */
-	struct cnxk_timesync_info *tstamp;
 	void *lookup_mem;
 	uint32_t gw_wdata;
 	uint8_t swtag_req;
 	uint8_t hws_id;
+	/* PTP timestamp */
+	struct cnxk_timesync_info **tstamp;
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
@@ -160,11 +162,11 @@ struct cn10k_sso_hws {
 struct cn9k_sso_hws {
 	uint64_t base;
 	uint64_t gw_wdata;
-	/* PTP timestamp */
-	struct cnxk_timesync_info *tstamp;
 	void *lookup_mem;
 	uint8_t swtag_req;
 	uint8_t hws_id;
+	/* PTP timestamp */
+	struct cnxk_timesync_info **tstamp;
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
@@ -177,12 +179,12 @@ struct cn9k_sso_hws {
 struct cn9k_sso_hws_dual {
 	uint64_t base[2]; /* Ping and Pong */
 	uint64_t gw_wdata;
-	/* PTP timestamp */
-	struct cnxk_timesync_info *tstamp;
 	void *lookup_mem;
 	uint8_t swtag_req;
 	uint8_t vws; /* Ping pong bit */
 	uint8_t hws_id;
+	/* PTP timestamp */
+	struct cnxk_timesync_info **tstamp;
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 1f2e1b4b5d..b4fd821912 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -207,6 +207,14 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
 	return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
 }
 
+static void
+cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev,
+		    struct cnxk_sso_evdev *dev)
+{
+	if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		dev->tstamp[port_id] = &cnxk_eth_dev->tstamp;
+}
+
 int
 cnxk_sso_rx_adapter_queue_add(
 	const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@@ -255,6 +263,7 @@ cnxk_sso_rx_adapter_queue_add(
 			roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
 					      rxq_sp->qconf.mp->pool_id, true,
 					      dev->force_ena_bp, rxq_sp->tc);
+		cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
 		cnxk_eth_dev->nb_rxq_sso++;
 	}
 
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 5ecb20f038..0f8790b8c7 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -1567,7 +1567,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				ol_flags3, mbuf3);
 		}
 
-		if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
+		if ((flags & NIX_RX_OFFLOAD_TSTAMP_F) &&
+		    ((flags & NIX_RX_VWQE_F) && tstamp)) {
 			const uint16x8_t len_off = {
 				0,			     /* ptype   0:15 */
 				0,			     /* ptype  16:32 */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
@ 2022-09-12 13:13   ` Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 03/32] common/cnxk: fix part value for cn10k Nithin Dabilpuram
                     ` (29 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:13 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Pavan Nikhilesh, Shijith Thotton
  Cc: jerinj, dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add FC check in vector event Tx path, the check needs to be
performed after head wait right before LMTST is issued.
Since, SQB pool fc updates are delayed w.r.t the actual
utilization of pool add sufficient slack to avoid overflow.

Added a new device argument to override the default SQB slack
configured, can be used as follows:

    -a 0002:02:00.0,sqb_slack=32

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/nics/cnxk.rst                 | 12 +++++++
 drivers/common/cnxk/roc_nix.h            |  7 ++--
 drivers/common/cnxk/roc_nix_priv.h       |  1 -
 drivers/common/cnxk/roc_nix_queue.c      | 21 +++++------
 drivers/common/cnxk/roc_nix_tm.c         |  2 +-
 drivers/common/cnxk/roc_nix_tm_ops.c     |  4 +--
 drivers/event/cnxk/cn10k_eventdev.c      |  3 +-
 drivers/event/cnxk/cn9k_eventdev.c       |  3 +-
 drivers/event/cnxk/cn9k_worker.h         |  4 +++
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  9 ++---
 drivers/net/cnxk/cn10k_tx.h              | 46 ++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_ethdev_devargs.c   |  8 ++++-
 12 files changed, 97 insertions(+), 23 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index e24eaa8bc4..eeaa3fa1cc 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -157,6 +157,18 @@ Runtime Config Options
    With the above configuration, each send queue's descriptor buffer count is
    limited to a maximum of 64 buffers.
 
+- ``SQB slack count`` (default ``12``)
+
+   Send queue descriptor slack count added to SQB count when a Tx queue is
+   created, can be set using ``sqb_slack`` ``devargs`` parameter.
+
+   For example::
+
+      -a 0002:02:00.0,sqb_slack=32
+
+   With the above configuration, each send queue's descriptor buffer count will
+   be increased by 32, while keeping the queue limit to default configuration.
+
 - ``Switch header enable`` (default ``none``)
 
    A port can be configured to a specific switch header type by using
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 4671f80e7c..c9aaedc915 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -13,6 +13,8 @@
 #define ROC_NIX_BPF_STATS_MAX	      12
 #define ROC_NIX_MTR_ID_INVALID	      UINT32_MAX
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
+#define ROC_NIX_SQB_LOWER_THRESH      70U
+#define ROC_NIX_SQB_SLACK	      12U
 
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
@@ -410,19 +412,20 @@ struct roc_nix {
 	bool enable_loop;
 	bool hw_vlan_ins;
 	uint8_t lock_rx_ctx;
-	uint32_t outb_nb_desc;
+	uint16_t sqb_slack;
 	uint16_t outb_nb_crypto_qs;
+	uint32_t outb_nb_desc;
 	uint32_t ipsec_in_min_spi;
 	uint32_t ipsec_in_max_spi;
 	uint32_t ipsec_out_max_sa;
 	bool ipsec_out_sso_pffunc;
+	bool custom_sa_action;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
 	bool io_enabled;
 	bool rx_ptp_ena;
 	uint16_t cints;
-	bool custom_sa_action;
 
 #define ROC_NIX_MEM_SZ (6 * 1024)
 	uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 5b0522c8cb..a3d4ddf5d5 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -13,7 +13,6 @@
 #define NIX_DEF_SQB	     ((uint16_t)16)
 #define NIX_MIN_SQB	     ((uint16_t)8)
 #define NIX_SQB_LIST_SPACE   ((uint16_t)2)
-#define NIX_SQB_LOWER_THRESH ((uint16_t)70)
 
 /* Apply BP/DROP when CQ is 95% full */
 #define NIX_CQ_THRESH_LEVEL	(5 * 256 / 100)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index fa4c954631..692b13415a 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -682,12 +682,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	else
 		sqes_per_sqb = (blk_sz / 8) / 8;
 
-	sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
+	sq->nb_desc = PLT_MAX(512U, sq->nb_desc);
 	nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
 	nb_sqb_bufs += NIX_SQB_LIST_SPACE;
 	/* Clamp up the SQB count */
 	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
-			      (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
+			      PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
 
 	sq->nb_sqb_bufs = nb_sqb_bufs;
 	sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
@@ -695,8 +695,9 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 		nb_sqb_bufs -
 		(PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
 	sq->nb_sqb_bufs_adj =
-		(sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
+		(sq->nb_sqb_bufs_adj * ROC_NIX_SQB_LOWER_THRESH) / 100;
 
+	nb_sqb_bufs += roc_nix->sqb_slack;
 	/* Explicitly set nat_align alone as by default pool is with both
 	 * nat_align and buf_offset = 1 which we don't want for SQB.
 	 */
@@ -711,12 +712,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 		aura.fc_stype = 0x3; /* STSTP */
 	aura.fc_addr = (uint64_t)sq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
-	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
+	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
 				 &pool);
 	if (rc)
 		goto fail;
 
-	sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
+	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
 	if (sq->sqe_mem == NULL) {
 		rc = NIX_ERR_NO_MEM;
 		goto nomem;
@@ -724,21 +725,21 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 
 	/* Fill the initial buffers */
 	iova = (uint64_t)sq->sqe_mem;
-	for (count = 0; count < NIX_MAX_SQB; count++) {
+	for (count = 0; count < nb_sqb_bufs; count++) {
 		roc_npa_aura_op_free(sq->aura_handle, 0, iova);
 		iova += blk_sz;
 	}
 
-	if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) !=
-	    NIX_MAX_SQB) {
+	if (roc_npa_aura_op_available_wait(sq->aura_handle, nb_sqb_bufs, 0) !=
+	    nb_sqb_bufs) {
 		plt_err("Failed to free all pointers to the pool");
 		rc = NIX_ERR_NO_MEM;
 		goto npa_fail;
 	}
 
 	roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
-	roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
-	sq->aura_sqb_bufs = NIX_MAX_SQB;
+	roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
+	sq->aura_sqb_bufs = nb_sqb_bufs;
 
 	return rc;
 npa_fail:
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a31abded1a..81d491a3fd 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -594,7 +594,7 @@ roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
 
 		/* SQ reached quiescent state */
 		if (sqb_cnt <= 1 && head_off == tail_off &&
-		    (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
+		    (*(volatile uint64_t *)sq->fc == sq->aura_sqb_bufs)) {
 			break;
 		}
 
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 4aa55002fe..7036495ad8 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -67,7 +67,7 @@ roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
 	if (enable)
 		*(volatile uint64_t *)sq->fc = rsp->aura.count;
 	else
-		*(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
+		*(volatile uint64_t *)sq->fc = sq->aura_sqb_bufs;
 	/* Sync write barrier */
 	plt_wmb();
 	return 0;
@@ -535,7 +535,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 		tail_off = (val >> 28) & 0x3F;
 
 		if (sqb_cnt > 1 || head_off != tail_off ||
-		    (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
+		    (*(uint64_t *)sq->fc != sq->aura_sqb_bufs))
 			plt_err("Failed to gracefully flush sq %u", sq->qid);
 	}
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 0be7ebfe29..fee01713b4 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -812,7 +812,8 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 			sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+		txq->nb_sqb_bufs_adj =
+			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 8ade30f84b..992a2a555c 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1043,7 +1043,8 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 			sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+		txq->nb_sqb_bufs_adj =
+			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 54b3545022..d86cb94a77 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -761,6 +761,10 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 	    !(flags & NIX_TX_OFFLOAD_SECURITY_F))
 		rte_io_wmb();
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
+	if (((txq->nb_sqb_bufs_adj -
+	      __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
+	     << txq->sqes_per_sqb_log2) <= 0)
+		return 0;
 	cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
 	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
 			      txq->mark_fmt);
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index b4fd821912..7937cadd25 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -351,14 +351,15 @@ cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
 {
 	int rc;
 
-	if (sq->nb_sqb_bufs != nb_sqb_bufs) {
+	if (sq->aura_sqb_bufs != nb_sqb_bufs) {
 		rc = roc_npa_aura_limit_modify(
 			sq->aura_handle,
 			RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
 		if (rc < 0)
 			return rc;
 
-		sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs);
+		sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs) -
+				  sq->roc_nix->sqb_slack;
 	}
 	return 0;
 }
@@ -556,7 +557,7 @@ cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
 	} else {
 		txq = eth_dev->data->tx_queues[tx_queue_id];
 		sq = &cnxk_eth_dev->sqs[tx_queue_id];
-		cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+		cnxk_sso_sqb_aura_limit_edit(sq, sq->aura_sqb_bufs);
 		ret = cnxk_sso_updt_tx_queue_data(
 			event_dev, eth_dev->data->port_id, tx_queue_id, txq);
 		if (ret < 0)
@@ -588,7 +589,7 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
 							     i);
 	} else {
 		sq = &cnxk_eth_dev->sqs[tx_queue_id];
-		cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+		cnxk_sso_sqb_aura_limit_edit(sq, sq->aura_sqb_bufs);
 		ret = cnxk_sso_updt_tx_queue_data(
 			event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
 		if (ret < 0)
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index ea13866b20..8056510589 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -54,6 +54,31 @@
 
 #define NIX_NB_SEGS_TO_SEGDW(x) ((NIX_SEGDW_MAGIC >> ((x) << 2)) & 0xF)
 
+static __plt_always_inline void
+cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
+{
+	int64_t cached, refill;
+
+retry:
+	while (__atomic_load_n(&txq->fc_cache_pkts, __ATOMIC_RELAXED) < 0)
+		;
+	cached = __atomic_sub_fetch(&txq->fc_cache_pkts, req, __ATOMIC_ACQUIRE);
+	/* Check if there is enough space, else update and retry. */
+	if (cached < 0) {
+		/* Check if we have space else retry. */
+		do {
+			refill =
+				(txq->nb_sqb_bufs_adj -
+				 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
+				<< txq->sqes_per_sqb_log2;
+		} while (refill <= 0);
+		__atomic_compare_exchange(&txq->fc_cache_pkts, &cached, &refill,
+					  0, __ATOMIC_RELEASE,
+					  __ATOMIC_RELAXED);
+		goto retry;
+	}
+}
+
 /* Function to determine no of tx subdesc required in case ext
  * sub desc is enabled.
  */
@@ -1039,6 +1064,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= (15ULL << 12);
 		data |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, 16);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data, pa);
 
@@ -1048,6 +1075,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= ((uint64_t)(burst - 17)) << 12;
 		data |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst - 16);
 		/* STEOR1 */
 		roc_lmt_submit_steorl(data, pa);
 	} else if (burst) {
@@ -1057,6 +1086,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= ((uint64_t)(burst - 1)) << 12;
 		data |= lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data, pa);
 	}
@@ -1188,6 +1219,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data0 |= (15ULL << 12);
 		data0 |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, 16);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data0, pa0);
 
@@ -1197,6 +1230,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data1 |= ((uint64_t)(burst - 17)) << 12;
 		data1 |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst - 16);
 		/* STEOR1 */
 		roc_lmt_submit_steorl(data1, pa1);
 	} else if (burst) {
@@ -1207,6 +1242,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data0 |= ((burst - 1) << 12);
 		data0 |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data0, pa0);
 	}
@@ -2735,6 +2772,9 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[0] |= (15ULL << 12);
 		wd.data[0] |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq,
+				cn10k_nix_pkts_per_vec_brst(flags) >> 1);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(wd.data[0], pa);
 
@@ -2750,6 +2790,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[1] |= ((uint64_t)(lnum - 17)) << 12;
 		wd.data[1] |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq,
+				burst - (cn10k_nix_pkts_per_vec_brst(flags) >>
+					 1));
 		/* STEOR1 */
 		roc_lmt_submit_steorl(wd.data[1], pa);
 	} else if (lnum) {
@@ -2765,6 +2809,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[0] |= ((uint64_t)(lnum - 1)) << 12;
 		wd.data[0] |= lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(wd.data[0], pa);
 	}
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 248582e1f6..4ded850622 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -246,6 +246,7 @@ parse_sdp_channel_mask(const char *key, const char *value, void *extra_args)
 #define CNXK_SDP_CHANNEL_MASK	"sdp_channel_mask"
 #define CNXK_FLOW_PRE_L2_INFO	"flow_pre_l2_info"
 #define CNXK_CUSTOM_SA_ACT	"custom_sa_act"
+#define CNXK_SQB_SLACK		"sqb_slack"
 
 int
 cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -254,6 +255,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	uint16_t sqb_count = CNXK_NIX_TX_MAX_SQB;
 	struct flow_pre_l2_size_info pre_l2_info;
 	uint32_t ipsec_in_max_spi = BIT(8) - 1;
+	uint16_t sqb_slack = ROC_NIX_SQB_SLACK;
 	uint32_t ipsec_out_max_sa = BIT(12);
 	uint16_t flow_prealloc_size = 1;
 	uint16_t switch_header_type = 0;
@@ -311,6 +313,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 			   &parse_pre_l2_hdr_info, &pre_l2_info);
 	rte_kvargs_process(kvlist, CNXK_CUSTOM_SA_ACT, &parse_flag,
 			   &custom_sa_act);
+	rte_kvargs_process(kvlist, CNXK_SQB_SLACK, &parse_sqb_count,
+			   &sqb_slack);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -328,6 +332,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	dev->nix.reta_sz = reta_sz;
 	dev->nix.lock_rx_ctx = lock_rx_ctx;
 	dev->nix.custom_sa_action = custom_sa_act;
+	dev->nix.sqb_slack = sqb_slack;
 	dev->npc.flow_prealloc_size = flow_prealloc_size;
 	dev->npc.flow_max_priority = flow_max_priority;
 	dev->npc.switch_header_type = switch_header_type;
@@ -356,4 +361,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
 			      CNXK_OUTB_NB_CRYPTO_QS "=<1-64>"
 			      CNXK_NO_INL_DEV "=0"
 			      CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>"
-			      CNXK_CUSTOM_SA_ACT "=1");
+			      CNXK_CUSTOM_SA_ACT "=1"
+			      CNXK_SQB_SLACK "=<12-512>");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 03/32] common/cnxk: fix part value for cn10k
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
@ 2022-09-12 13:13   ` Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 04/32] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
                     ` (28 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:13 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Updating the logic for getting part and pass value for cn10k family,
as device tree compatible logic does not work in VMs.
Scanning all the PCI device and detect first RVU device, subsystem
device file gives part no and revision file provide pass information.

Fixes: 014a9e222bac ("common/cnxk: add model init and IO handling API")

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c    | 152 +++++++++++++++++++++--------
 drivers/common/cnxk/roc_platform.h |   3 +
 2 files changed, 113 insertions(+), 42 deletions(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index c934a10509..626de60fb0 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -2,6 +2,7 @@
  * Copyright(C) 2021 Marvell.
  */
 
+#include <dirent.h>
 #include <fcntl.h>
 #include <unistd.h>
 
@@ -40,6 +41,16 @@ struct roc_model *roc_model;
 #define MODEL_MINOR_SHIFT 0
 #define MODEL_MINOR_MASK  ((1 << MODEL_MINOR_BITS) - 1)
 
+#define MODEL_CN10K_PART_SHIFT	8
+#define MODEL_CN10K_PASS_BITS	4
+#define MODEL_CN10K_PASS_MASK	((1 << MODEL_CN10K_PASS_BITS) - 1)
+#define MODEL_CN10K_MAJOR_BITS	2
+#define MODEL_CN10K_MAJOR_SHIFT 2
+#define MODEL_CN10K_MAJOR_MASK	((1 << MODEL_CN10K_MAJOR_BITS) - 1)
+#define MODEL_CN10K_MINOR_BITS	2
+#define MODEL_CN10K_MINOR_SHIFT 0
+#define MODEL_CN10K_MINOR_MASK	((1 << MODEL_CN10K_MINOR_BITS) - 1)
+
 static const struct model_db {
 	uint32_t impl;
 	uint32_t part;
@@ -66,55 +77,101 @@ static const struct model_db {
 	{VENDOR_CAVIUM, PART_95xxMM, 0, 0, ROC_MODEL_CNF95xxMM_A0,
 	 "cnf95xxmm_a0"}};
 
-static uint32_t
-cn10k_part_get(void)
+/* Detect if RVU device */
+static bool
+is_rvu_device(unsigned long val)
 {
-	uint32_t soc = 0x0;
-	char buf[BUFSIZ];
-	char *ptr;
-	FILE *fd;
-
-	/* Read the CPU compatible variant */
-	fd = fopen("/proc/device-tree/compatible", "r");
-	if (!fd) {
-		plt_err("Failed to open /proc/device-tree/compatible");
-		goto err;
-	}
+	return (val == PCI_DEVID_CNXK_RVU_PF || val == PCI_DEVID_CNXK_RVU_VF ||
+		val == PCI_DEVID_CNXK_RVU_AF ||
+		val == PCI_DEVID_CNXK_RVU_AF_VF ||
+		val == PCI_DEVID_CNXK_RVU_NPA_PF ||
+		val == PCI_DEVID_CNXK_RVU_NPA_VF ||
+		val == PCI_DEVID_CNXK_RVU_SSO_TIM_PF ||
+		val == PCI_DEVID_CNXK_RVU_SSO_TIM_VF ||
+		val == PCI_DEVID_CN10K_RVU_CPT_PF ||
+		val == PCI_DEVID_CN10K_RVU_CPT_VF);
+}
 
-	if (fgets(buf, sizeof(buf), fd) == NULL) {
-		plt_err("Failed to read from /proc/device-tree/compatible");
-		goto fclose;
-	}
-	ptr = strchr(buf, ',');
-	if (!ptr) {
-		plt_err("Malformed 'CPU compatible': <%s>", buf);
-		goto fclose;
-	}
-	ptr++;
-	if (strcmp("cn10ka", ptr) == 0) {
-		soc = PART_106xx;
-	} else if (strcmp("cnf10ka", ptr) == 0) {
-		soc = PART_105xx;
-	} else if (strcmp("cnf10kb", ptr) == 0) {
-		soc = PART_105xxN;
-	} else if (strcmp("cn10kb", ptr) == 0) {
-		soc = PART_103xx;
-	} else {
-		plt_err("Unidentified 'CPU compatible': <%s>", ptr);
-		goto fclose;
+static int
+rvu_device_lookup(const char *dirname, uint32_t *part, uint32_t *pass)
+{
+	char filename[PATH_MAX];
+	unsigned long val;
+
+	/* Check if vendor id is cavium */
+	snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	if (val != PCI_VENDOR_ID_CAVIUM)
+		goto error;
+
+	/* Get device id  */
+	snprintf(filename, sizeof(filename), "%s/device", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	/* Check if device ID belongs to any RVU device */
+	if (!is_rvu_device(val))
+		goto error;
+
+	/* Get subsystem_device id */
+	snprintf(filename, sizeof(filename), "%s/subsystem_device", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	*part = val >> MODEL_CN10K_PART_SHIFT;
+
+	/* Get revision for pass value*/
+	snprintf(filename, sizeof(filename), "%s/revision", dirname);
+	if (plt_sysfs_value_parse(filename, &val) < 0)
+		goto error;
+
+	*pass = val & MODEL_CN10K_PASS_MASK;
+
+	return 0;
+error:
+	return -EINVAL;
+}
+
+/* Scans through all PCI devices, detects RVU device and returns
+ * subsystem_device
+ */
+static int
+cn10k_part_pass_get(uint32_t *part, uint32_t *pass)
+{
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+	char dirname[PATH_MAX];
+	struct dirent *e;
+	DIR *dir;
+
+	dir = opendir(SYSFS_PCI_DEVICES);
+	if (dir == NULL) {
+		plt_err("%s(): opendir failed: %s\n", __func__,
+			strerror(errno));
+		return -errno;
 	}
 
-fclose:
-	fclose(fd);
+	while ((e = readdir(dir)) != NULL) {
+		if (e->d_name[0] == '.')
+			continue;
+
+		snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
+			 e->d_name);
+
+		/* Lookup for rvu device and get part pass information */
+		if (!rvu_device_lookup(dirname, part, pass))
+			break;
+	}
 
-err:
-	return soc;
+	closedir(dir);
+	return 0;
 }
 
 static bool
 populate_model(struct roc_model *model, uint32_t midr)
 {
-	uint32_t impl, major, part, minor;
+	uint32_t impl, major, part, minor, pass;
 	bool found = false;
 	size_t i;
 
@@ -124,8 +181,19 @@ populate_model(struct roc_model *model, uint32_t midr)
 	minor = (midr >> MODEL_MINOR_SHIFT) & MODEL_MINOR_MASK;
 
 	/* Update part number for cn10k from device-tree */
-	if (part == SOC_PART_CN10K)
-		part = cn10k_part_get();
+	if (part == SOC_PART_CN10K) {
+		if (cn10k_part_pass_get(&part, &pass))
+			goto not_found;
+		/*
+		 * Pass value format:
+		 * Bits 0..1: minor pass
+		 * Bits 3..2: major pass
+		 */
+		minor = (pass >> MODEL_CN10K_MINOR_SHIFT) &
+			MODEL_CN10K_MINOR_MASK;
+		major = (pass >> MODEL_CN10K_MAJOR_SHIFT) &
+			MODEL_CN10K_MAJOR_MASK;
+	}
 
 	for (i = 0; i < PLT_DIM(model_db); i++)
 		if (model_db[i].impl == impl && model_db[i].part == part &&
@@ -136,7 +204,7 @@ populate_model(struct roc_model *model, uint32_t midr)
 			found = true;
 			break;
 		}
-
+not_found:
 	if (!found) {
 		model->flag = 0;
 		strncpy(model->name, "unknown", ROC_MODEL_STR_LEN_MAX - 1);
diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
index 502f243a81..3e7adfc5b8 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -24,6 +24,8 @@
 #include <rte_tailq.h>
 #include <rte_telemetry.h>
 
+#include "eal_filesystem.h"
+
 #include "roc_bits.h"
 
 #if defined(__ARM_FEATURE_SVE)
@@ -94,6 +96,7 @@
 #define plt_pci_device		    rte_pci_device
 #define plt_pci_read_config	    rte_pci_read_config
 #define plt_pci_find_ext_capability rte_pci_find_ext_capability
+#define plt_sysfs_value_parse	    eal_parse_sysfs_value
 
 #define plt_log2_u32	 rte_log2_u32
 #define plt_cpu_to_be_16 rte_cpu_to_be_16
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 04/32] common/cnxk: add cn10ka A1 platform
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 03/32] common/cnxk: fix part value for cn10k Nithin Dabilpuram
@ 2022-09-12 13:13   ` Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 05/32] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
                     ` (27 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:13 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for cn10ka A1 pass. It is next
minor pass of A0.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c | 1 +
 drivers/common/cnxk/roc_model.h | 9 ++++++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index 626de60fb0..bdbd9a96b2 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -60,6 +60,7 @@ static const struct model_db {
 	char name[ROC_MODEL_STR_LEN_MAX];
 } model_db[] = {
 	{VENDOR_ARM, PART_106xx, 0, 0, ROC_MODEL_CN106xx_A0, "cn10ka_a0"},
+	{VENDOR_ARM, PART_106xx, 0, 1, ROC_MODEL_CN106xx_A1, "cn10ka_a1"},
 	{VENDOR_ARM, PART_105xx, 0, 0, ROC_MODEL_CNF105xx_A0, "cnf10ka_a0"},
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index 37c8a4744d..d231d44b60 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -25,6 +25,7 @@ struct roc_model {
 #define ROC_MODEL_CNF105xx_A0  BIT_ULL(21)
 #define ROC_MODEL_CNF105xxN_A0 BIT_ULL(22)
 #define ROC_MODEL_CN103xx_A0   BIT_ULL(23)
+#define ROC_MODEL_CN106xx_A1   BIT_ULL(24)
 /* Following flags describe platform code is running on */
 #define ROC_ENV_HW   BIT_ULL(61)
 #define ROC_ENV_EMUL BIT_ULL(62)
@@ -48,7 +49,7 @@ struct roc_model {
 	 ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CNF95xxN_A1 |                       \
 	 ROC_MODEL_CNF95xxN_B0)
 
-#define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0)
+#define ROC_MODEL_CN106xx   (ROC_MODEL_CN106xx_A0 | ROC_MODEL_CN106xx_A1)
 #define ROC_MODEL_CNF105xx  (ROC_MODEL_CNF105xx_A0)
 #define ROC_MODEL_CNF105xxN (ROC_MODEL_CNF105xxN_A0)
 #define ROC_MODEL_CN103xx   (ROC_MODEL_CN103xx_A0)
@@ -191,6 +192,12 @@ roc_model_is_cn10ka_a0(void)
 	return roc_model->flag & ROC_MODEL_CN106xx_A0;
 }
 
+static inline uint64_t
+roc_model_is_cn10ka_a1(void)
+{
+	return roc_model->flag & ROC_MODEL_CN106xx_A1;
+}
+
 static inline uint64_t
 roc_model_is_cnf10ka_a0(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 05/32] common/cnxk: update inbound inline IPsec config mailbox
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (2 preceding siblings ...)
  2022-09-12 13:13   ` [PATCH v3 04/32] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
@ 2022-09-12 13:13   ` Nithin Dabilpuram
  2022-09-12 13:13   ` [PATCH v3 06/32] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
                     ` (26 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:13 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev, Srujana Challa

From: Srujana Challa <schalla@marvell.com>

Updates CPT inbound inline IPsec configuration mailbox
to provide opcode and CPT credit from VF.
This patch also adds mailbox for reading inbound IPsec
configuration.

Signed-off-by: Srujana Challa <schalla@marvell.com>
---
 drivers/common/cnxk/roc_cpt.c   | 15 +++++++++++++++
 drivers/common/cnxk/roc_cpt.h   |  2 ++
 drivers/common/cnxk/roc_mbox.h  | 12 +++++++++---
 drivers/common/cnxk/version.map |  1 +
 4 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index f1be6a3401..d607bde3c4 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -260,6 +260,21 @@ roc_cpt_inline_ipsec_cfg(struct dev *cpt_dev, uint8_t lf_id,
 	return cpt_lf_outb_cfg(cpt_dev, sso_pf_func, nix_pf_func, lf_id, ena);
 }
 
+int
+roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
+				  struct nix_inline_ipsec_cfg *inb_cfg)
+{
+	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
+	struct dev *dev = &cpt->dev;
+	struct msg_req *req;
+
+	req = mbox_alloc_msg_nix_read_inline_ipsec_cfg(dev->mbox);
+	if (req == NULL)
+		return -EIO;
+
+	return mbox_process_msg(dev->mbox, (void *)&inb_cfg);
+}
+
 int
 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 			     uint16_t param2)
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index a3a65f1e94..4e3a078a90 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -158,6 +158,8 @@ int __roc_api roc_cpt_lf_ctx_flush(struct roc_cpt_lf *lf, void *cptr,
 int __roc_api roc_cpt_lf_ctx_reload(struct roc_cpt_lf *lf, void *cptr);
 int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 				       struct roc_nix *nix);
+int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(
+	struct roc_cpt *roc_cpt, struct nix_inline_ipsec_cfg *inb_cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
 					   uint16_t param1, uint16_t param2);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 965c704322..912de1121b 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -263,7 +263,9 @@ struct mbox_msghdr {
 	  nix_bp_cfg_rsp)                                                      \
 	M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req,      \
 	  msg_rsp)                                                             \
-	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)
+	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)            \
+	M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg,        \
+	  msg_req, nix_inline_ipsec_cfg)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1161,7 +1163,9 @@ struct nix_inline_ipsec_cfg {
 	uint32_t __io cpt_credit;
 	struct {
 		uint8_t __io egrp;
-		uint8_t __io opcode;
+		uint16_t __io opcode;
+		uint16_t __io param1;
+		uint16_t __io param2;
 	} gen_cfg;
 	struct {
 		uint16_t __io cpt_pf_func;
@@ -1465,7 +1469,9 @@ struct cpt_rx_inline_lf_cfg_msg {
 	uint16_t __io sso_pf_func;
 	uint16_t __io param1;
 	uint16_t __io param2;
-	uint16_t __io reserved;
+	uint16_t __io opcode;
+	uint32_t __io credit;
+	uint32_t __io reserved;
 };
 
 enum cpt_eng_type {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 019f53173f..a2d99e1f4a 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -65,6 +65,7 @@ INTERNAL {
 	roc_cpt_dev_init;
 	roc_cpt_eng_grp_add;
 	roc_cpt_inline_ipsec_cfg;
+	roc_cpt_inline_ipsec_inb_cfg_read;
 	roc_cpt_inline_ipsec_inb_cfg;
 	roc_cpt_iq_disable;
 	roc_cpt_iq_enable;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 06/32] net/cnxk: fix missing fc wait for outbound path in vec mode
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (3 preceding siblings ...)
  2022-09-12 13:13   ` [PATCH v3 05/32] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
@ 2022-09-12 13:13   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 07/32] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
                     ` (25 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:13 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Fix missing fc wait for outbound path in vector mode.
Currently only poll mode has it.

Fixes: 358d02d20a2f ("net/cnxk: support flow control for outbound inline")
Cc: ndabilpuram@marvell.com

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 8056510589..07c88a974e 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1049,9 +1049,13 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		uint16_t sec_pkts = ((c_lnum << 1) + c_loff);
+
 		/* Reduce pkts to be sent to CPT */
-		burst -= ((c_lnum << 1) + c_loff);
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		burst -= sec_pkts;
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
@@ -1199,9 +1203,13 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
+		uint16_t sec_pkts = ((c_lnum << 1) + c_loff);
+
 		/* Reduce pkts to be sent to CPT */
-		burst -= ((c_lnum << 1) + c_loff);
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		burst -= sec_pkts;
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
@@ -2753,7 +2761,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 
 	/* Submit CPT instructions if any */
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
-		cn10k_nix_sec_fc_wait(txq, (c_lnum << 1) + c_loff);
+		uint16_t sec_pkts = (c_lnum << 1) + c_loff;
+
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, sec_pkts);
+		cn10k_nix_sec_fc_wait(txq, sec_pkts);
 		cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, c_lnum, c_loff,
 				     c_shft);
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 07/32] common/cnxk: limit meta aura workaround to CN10K A0
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (4 preceding siblings ...)
  2022-09-12 13:13   ` [PATCH v3 06/32] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 08/32] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
                     ` (24 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Limit meta aura workaround to CN10K A0.
Also other NIX and Inline related Erratas applicable
for CN10K A1.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_errata.h  |  7 +++++++
 drivers/common/cnxk/roc_nix_inl.c | 10 ++++++----
 drivers/net/cnxk/cnxk_ethdev.c    |  3 ++-
 3 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index f04829736b..8dc372f956 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -80,6 +80,13 @@ roc_errata_nix_has_perf_issue_on_stats_update(void)
 /* Errata IPBUCPT-38726, IPBUCPT-38727 */
 static inline bool
 roc_errata_cpt_hang_on_x2p_bp(void)
+{
+	return roc_model_is_cn10ka_a0() || roc_model_is_cn10ka_a1();
+}
+
+/* IPBUNIXRX-40400 */
+static inline bool
+roc_errata_nix_no_meta_aura(void)
 {
 	return roc_model_is_cn10ka_a0();
 }
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 7da89382e9..603551bf83 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -627,18 +627,18 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 	inl_rq->first_skip = rq->first_skip;
 	inl_rq->later_skip = rq->later_skip;
 	inl_rq->lpb_size = rq->lpb_size;
-	inl_rq->lpb_drop_ena = true;
 	inl_rq->spb_ena = rq->spb_ena;
 	inl_rq->spb_aura_handle = rq->spb_aura_handle;
 	inl_rq->spb_size = rq->spb_size;
-	inl_rq->spb_drop_ena = !!rq->spb_ena;
 
-	if (!roc_model_is_cn9k()) {
+	if (roc_errata_nix_no_meta_aura()) {
 		uint64_t aura_limit =
 			roc_npa_aura_op_limit_get(inl_rq->aura_handle);
 		uint64_t aura_shift = plt_log2_u32(aura_limit);
 		uint64_t aura_drop, drop_pc;
 
+		inl_rq->lpb_drop_ena = true;
+
 		if (aura_shift < 8)
 			aura_shift = 0;
 		else
@@ -653,12 +653,14 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 		roc_npa_aura_drop_set(inl_rq->aura_handle, aura_drop, true);
 	}
 
-	if (inl_rq->spb_ena) {
+	if (roc_errata_nix_no_meta_aura() && inl_rq->spb_ena) {
 		uint64_t aura_limit =
 			roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
 		uint64_t aura_shift = plt_log2_u32(aura_limit);
 		uint64_t aura_drop, drop_pc;
 
+		inl_rq->spb_drop_ena = true;
+
 		if (aura_shift < 8)
 			aura_shift = 0;
 		else
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index cfcc4df916..d90baabc4d 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -617,7 +617,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rq->first_skip = first_skip;
 	rq->later_skip = sizeof(struct rte_mbuf);
 	rq->lpb_size = mp->elt_size;
-	rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
+	if (roc_errata_nix_no_meta_aura())
+		rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
 
 	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
 	if (roc_nix_inl_inb_is_enabled(nix))
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 08/32] common/cnxk: delay inline device RQ enable to dev start
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (5 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 07/32] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 09/32] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
                     ` (23 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Similar to other RQ's, delay inline device rq until dev is started
to avoid traffic reception when device is stopped.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_idev.h    |  2 --
 drivers/common/cnxk/roc_nix_inl.c | 34 ++++++++++++++++++++++++++++---
 drivers/common/cnxk/roc_nix_inl.h |  5 ++++-
 drivers/common/cnxk/version.map   |  7 ++++---
 drivers/net/cnxk/cnxk_ethdev.c    | 14 ++++++++++++-
 5 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 7e0beed495..16793c2828 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -17,6 +17,4 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
 
-uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
-
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 603551bf83..c621867e54 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -245,6 +245,9 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
 	struct roc_cpt *roc_cpt;
 	struct roc_cpt_rxc_time_cfg cfg;
 
+	if (!idev)
+		return -EFAULT;
+
 	PLT_SET_USED(max_frags);
 	if (idev == NULL)
 		return -ENOTSUP;
@@ -587,7 +590,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
 }
 
 int
-roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
+roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 {
 	struct idev_cfg *idev = idev_get_cfg();
 	int port_id = rq->roc_nix->port_id;
@@ -688,9 +691,9 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 
 	/* Prepare and send RQ init mbox */
 	if (roc_model_is_cn9k())
-		rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, true);
+		rc = nix_rq_cn9k_cfg(dev, inl_rq, inl_dev->qints, false, enable);
 	else
-		rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, true);
+		rc = nix_rq_cfg(dev, inl_rq, inl_dev->qints, false, enable);
 	if (rc) {
 		plt_err("Failed to prepare aq_enq msg, rc=%d", rc);
 		return rc;
@@ -755,6 +758,31 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 	return rc;
 }
 
+int
+roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_nix_rq *inl_rq = roc_nix_inl_dev_rq(roc_nix);
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
+	int rc;
+
+	if (!idev)
+		return -EFAULT;
+
+	if (nix->inb_inl_dev) {
+		if (!inl_rq || !idev->nix_inl_dev)
+			return -EFAULT;
+
+		inl_dev = idev->nix_inl_dev;
+
+		rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
 void
 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
 {
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index c7b1817d7b..702ec01384 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -165,7 +165,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
 uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
 					   bool inl_dev_sa, uint32_t spi);
 void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
-int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq);
+int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
 int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
 bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
 struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
@@ -175,6 +175,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
 					   uint16_t max_frags);
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
 				       bool inb_inl_dev);
+int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
 
 /* NIX Inline Outbound API */
 int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@@ -189,6 +190,8 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
 					void *args);
 int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
+uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
 	ROC_NIX_INL_SA_OP_FLUSH,
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index a2d99e1f4a..6d43e37d1e 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -90,7 +90,6 @@ INTERNAL {
 	roc_hash_sha512_gen;
 	roc_idev_cpt_get;
 	roc_idev_cpt_set;
-	roc_nix_inl_outb_ring_base_get;
 	roc_idev_lmt_base_addr_get;
 	roc_idev_npa_maxpools_get;
 	roc_idev_npa_maxpools_set;
@@ -137,11 +136,13 @@ INTERNAL {
 	roc_nix_get_vwqe_interval;
 	roc_nix_inl_cb_register;
 	roc_nix_inl_cb_unregister;
+	roc_nix_inl_ctx_write;
 	roc_nix_inl_dev_dump;
 	roc_nix_inl_dev_fini;
 	roc_nix_inl_dev_init;
 	roc_nix_inl_dev_is_probed;
 	roc_nix_inl_dev_lock;
+	roc_nix_inl_dev_pffunc_get;
 	roc_nix_inl_dev_rq;
 	roc_nix_inl_dev_rq_get;
 	roc_nix_inl_dev_rq_put;
@@ -163,11 +164,11 @@ INTERNAL {
 	roc_nix_inl_outb_sa_base_get;
 	roc_nix_inl_outb_sso_pffunc_get;
 	roc_nix_inl_outb_is_enabled;
+	roc_nix_inl_outb_ring_base_get;
 	roc_nix_inl_outb_soft_exp_poll_switch;
+	roc_nix_inl_rq_ena_dis;
 	roc_nix_inl_sa_sync;
 	roc_nix_inl_ts_pkind_set;
-	roc_nix_inl_ctx_write;
-	roc_nix_inl_dev_pffunc_get;
 	roc_nix_inl_outb_cpt_lfs_dump;
 	roc_nix_cpt_ctx_cache_sync;
 	roc_nix_is_lbk;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index d90baabc4d..80ab3cfedd 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -660,7 +660,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
 
 		/* Setup rq reference for inline dev if present */
-		rc = roc_nix_inl_dev_rq_get(rq);
+		rc = roc_nix_inl_dev_rq_get(rq, !!eth_dev->data->dev_started);
 		if (rc)
 			goto free_mem;
 	}
@@ -1482,6 +1482,10 @@ cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
 
 	roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
 
+	/* Stop inline device RQ first */
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+		roc_nix_inl_rq_ena_dis(&dev->nix, false);
+
 	/* Stop rx queues and free up pkts pending */
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		rc = dev_ops->rx_queue_stop(eth_dev, i);
@@ -1527,6 +1531,14 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
 			return rc;
 	}
 
+	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		rc = roc_nix_inl_rq_ena_dis(&dev->nix, true);
+		if (rc) {
+			plt_err("Failed to enable Inline device RQ, rc=%d", rc);
+			return rc;
+		}
+	}
+
 	/* Start tx queues  */
 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 		rc = cnxk_nix_tx_queue_start(eth_dev, i);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 09/32] common/cnxk: reserve aura zero on cn10ka NPA
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (6 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 08/32] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 10/32] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
                     ` (22 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella, Ashwin Sekhar T K, Pavan Nikhilesh
  Cc: jerinj, dev

Reserve aura id 0 on cn10k and provide mechanism to
specifically allocate it and free it via roc_npa_*
API's.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_dpi.c           |   2 +-
 drivers/common/cnxk/roc_nix_queue.c     |   2 +-
 drivers/common/cnxk/roc_npa.c           | 100 +++++++++++++++++++-----
 drivers/common/cnxk/roc_npa.h           |   6 +-
 drivers/common/cnxk/roc_npa_priv.h      |   1 +
 drivers/common/cnxk/roc_sso.c           |   2 +-
 drivers/common/cnxk/version.map         |   1 +
 drivers/mempool/cnxk/cnxk_mempool_ops.c |   7 +-
 8 files changed, 97 insertions(+), 24 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index 23b2cc41a4..93c8318a3d 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -75,7 +75,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi)
 
 	memset(&aura, 0, sizeof(aura));
 	rc = roc_npa_pool_create(&aura_handle, DPI_CMD_QUEUE_SIZE,
-				 DPI_CMD_QUEUE_BUFS, &aura, &pool);
+				 DPI_CMD_QUEUE_BUFS, &aura, &pool, 0);
 	if (rc) {
 		plt_err("Failed to create NPA pool, err %d\n", rc);
 		return rc;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 692b13415a..70b4516eca 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -713,7 +713,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	aura.fc_addr = (uint64_t)sq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
 	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
-				 &pool);
+				 &pool, 0);
 	if (rc)
 		goto fail;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 1e60f443f0..760a2315b2 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -260,16 +260,60 @@ bitmap_ctzll(uint64_t slab)
 	return __builtin_ctzll(slab);
 }
 
+static int
+find_free_aura(struct npa_lf *lf, uint32_t flags)
+{
+	struct plt_bitmap *bmp = lf->npa_bmp;
+	uint64_t aura0_state = 0;
+	uint64_t slab;
+	uint32_t pos;
+	int idx = -1;
+	int rc;
+
+	if (flags & ROC_NPA_ZERO_AURA_F) {
+		/* Only look for zero aura */
+		if (plt_bitmap_get(bmp, 0))
+			return 0;
+		plt_err("Zero aura already in use");
+		return -1;
+	}
+
+	if (lf->zero_aura_rsvd) {
+		/* Save and clear zero aura bit if needed */
+		aura0_state = plt_bitmap_get(bmp, 0);
+		if (aura0_state)
+			plt_bitmap_clear(bmp, 0);
+	}
+
+	pos = 0;
+	slab = 0;
+	/* Scan from the beginning */
+	plt_bitmap_scan_init(bmp);
+	/* Scan bitmap to get the free pool */
+	rc = plt_bitmap_scan(bmp, &pos, &slab);
+	/* Empty bitmap */
+	if (rc == 0) {
+		plt_err("Aura's exhausted");
+		goto empty;
+	}
+
+	idx = pos + bitmap_ctzll(slab);
+empty:
+	if (lf->zero_aura_rsvd && aura0_state)
+		plt_bitmap_set(bmp, 0);
+
+	return idx;
+}
+
 static int
 npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 			 const uint32_t block_count, struct npa_aura_s *aura,
-			 struct npa_pool_s *pool, uint64_t *aura_handle)
+			 struct npa_pool_s *pool, uint64_t *aura_handle,
+			 uint32_t flags)
 {
 	int rc, aura_id, pool_id, stack_size, alloc_size;
 	char name[PLT_MEMZONE_NAMESIZE];
 	const struct plt_memzone *mz;
-	uint64_t slab;
-	uint32_t pos;
 
 	/* Sanity check */
 	if (!lf || !block_size || !block_count || !pool || !aura ||
@@ -281,20 +325,11 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 	    block_size > ROC_NPA_MAX_BLOCK_SZ)
 		return NPA_ERR_INVALID_BLOCK_SZ;
 
-	pos = 0;
-	slab = 0;
-	/* Scan from the beginning */
-	plt_bitmap_scan_init(lf->npa_bmp);
-	/* Scan bitmap to get the free pool */
-	rc = plt_bitmap_scan(lf->npa_bmp, &pos, &slab);
-	/* Empty bitmap */
-	if (rc == 0) {
-		plt_err("Mempools exhausted");
-		return NPA_ERR_AURA_ID_ALLOC;
-	}
-
 	/* Get aura_id from resource bitmap */
-	aura_id = pos + bitmap_ctzll(slab);
+	aura_id = find_free_aura(lf, flags);
+	if (aura_id < 0)
+		return NPA_ERR_AURA_ID_ALLOC;
+
 	/* Mark pool as reserved */
 	plt_bitmap_clear(lf->npa_bmp, aura_id);
 
@@ -374,7 +409,7 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 int
 roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 		    uint32_t block_count, struct npa_aura_s *aura,
-		    struct npa_pool_s *pool)
+		    struct npa_pool_s *pool, uint32_t flags)
 {
 	struct npa_aura_s defaura;
 	struct npa_pool_s defpool;
@@ -394,6 +429,11 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 		goto error;
 	}
 
+	if (flags & ROC_NPA_ZERO_AURA_F && !lf->zero_aura_rsvd) {
+		rc = NPA_ERR_ALLOC;
+		goto error;
+	}
+
 	if (aura == NULL) {
 		memset(&defaura, 0, sizeof(struct npa_aura_s));
 		aura = &defaura;
@@ -406,7 +446,7 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 	}
 
 	rc = npa_aura_pool_pair_alloc(lf, block_size, block_count, aura, pool,
-				      aura_handle);
+				      aura_handle, flags);
 	if (rc) {
 		plt_err("Failed to alloc pool or aura rc=%d", rc);
 		goto error;
@@ -522,6 +562,26 @@ roc_npa_pool_range_update_check(uint64_t aura_handle)
 	return 0;
 }
 
+uint64_t
+roc_npa_zero_aura_handle(void)
+{
+	struct idev_cfg *idev;
+	struct npa_lf *lf;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL)
+		return NPA_ERR_DEVICE_NOT_BOUNDED;
+
+	idev = idev_get_cfg();
+	if (idev == NULL)
+		return NPA_ERR_ALLOC;
+
+	/* Return aura handle only if reserved */
+	if (lf->zero_aura_rsvd)
+		return roc_npa_aura_handle_gen(0, lf->base);
+	return 0;
+}
+
 static inline int
 npa_attach(struct mbox *mbox)
 {
@@ -672,6 +732,10 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 	for (i = 0; i < nr_pools; i++)
 		plt_bitmap_set(lf->npa_bmp, i);
 
+	/* Reserve zero aura for all models other than CN9K */
+	if (!roc_model_is_cn9k())
+		lf->zero_aura_rsvd = true;
+
 	/* Allocate memory for qint context */
 	lf->npa_qint_mem = plt_zmalloc(sizeof(struct npa_qint) * nr_pools, 0);
 	if (lf->npa_qint_mem == NULL) {
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 59d13d88a1..69129cb4cc 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -711,10 +711,13 @@ struct roc_npa {
 int __roc_api roc_npa_dev_init(struct roc_npa *roc_npa);
 int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
 
+/* Flags to pool create */
+#define ROC_NPA_ZERO_AURA_F BIT(0)
+
 /* NPA pool */
 int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 				  uint32_t block_count, struct npa_aura_s *aura,
-				  struct npa_pool_s *pool);
+				  struct npa_pool_s *pool, uint32_t flags);
 int __roc_api roc_npa_aura_limit_modify(uint64_t aura_handle,
 					uint16_t aura_limit);
 int __roc_api roc_npa_pool_destroy(uint64_t aura_handle);
@@ -722,6 +725,7 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 					 uint64_t start_iova,
 					 uint64_t end_iova);
+uint64_t __roc_api roc_npa_zero_aura_handle(void);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h
index 5a02a61e00..de3d5448ba 100644
--- a/drivers/common/cnxk/roc_npa_priv.h
+++ b/drivers/common/cnxk/roc_npa_priv.h
@@ -32,6 +32,7 @@ struct npa_lf {
 	uint8_t aura_sz;
 	uint32_t qints;
 	uintptr_t base;
+	bool zero_aura_rsvd;
 };
 
 struct npa_qint {
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 126a9cba99..4bee5a97e1 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -473,7 +473,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
 	aura.fc_addr = (uint64_t)xaq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
 	rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
-				 &aura, &pool);
+				 &aura, &pool, 0);
 	if (rc) {
 		plt_err("Failed to create XAQ pool");
 		goto npa_fail;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6d43e37d1e..6c05e893e3 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -318,6 +318,7 @@ INTERNAL {
 	roc_npa_pool_destroy;
 	roc_npa_pool_op_pc_reset;
 	roc_npa_pool_range_update_check;
+	roc_npa_zero_aura_handle;
 	roc_npc_fini;
 	roc_npc_flow_create;
 	roc_npc_flow_destroy;
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index c7b75f026d..a0b94bb95c 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,10 +72,10 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
 int
 cnxk_mempool_alloc(struct rte_mempool *mp)
 {
+	uint32_t block_count, flags = 0;
 	uint64_t aura_handle = 0;
 	struct npa_aura_s aura;
 	struct npa_pool_s pool;
-	uint32_t block_count;
 	size_t block_size;
 	int rc = -ERANGE;
 
@@ -100,8 +100,11 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
 	if (mp->pool_config != NULL)
 		memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
 
+	if (aura.ena && aura.pool_addr == 0)
+		flags = ROC_NPA_ZERO_AURA_F;
+
 	rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
-				 &pool);
+				 &pool, flags);
 	if (rc) {
 		plt_err("Failed to alloc pool or aura rc=%d", rc);
 		goto error;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 10/32] common/cnxk: add support to set NPA buf type
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (7 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 09/32] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 11/32] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
                     ` (21 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Add support to set/get per-aura buf type with refs and
get sum of all aura limits matching given buf type mask
and val.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/meson.build    |  1 +
 drivers/common/cnxk/roc_npa.c      | 11 ++++
 drivers/common/cnxk/roc_npa.h      | 22 +++++++
 drivers/common/cnxk/roc_npa_priv.h |  8 ++-
 drivers/common/cnxk/roc_npa_type.c | 99 ++++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map    |  3 +
 6 files changed, 143 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/cnxk/roc_npa_type.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 6f808271d1..127fcbcdc5 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -51,6 +51,7 @@ sources = files(
         'roc_npa.c',
         'roc_npa_debug.c',
         'roc_npa_irq.c',
+        'roc_npa_type.c',
         'roc_npc.c',
         'roc_npc_mcam.c',
         'roc_npc_mcam_dump.c',
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 760a2315b2..ee42434c38 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -499,6 +499,7 @@ npa_aura_pool_pair_free(struct npa_lf *lf, uint64_t aura_handle)
 	pool_id = aura_id;
 	rc = npa_aura_pool_fini(lf->mbox, aura_id, aura_handle);
 	rc |= npa_stack_dma_free(lf, name, pool_id);
+	memset(&lf->aura_attr[aura_id], 0, sizeof(struct npa_aura_attr));
 
 	plt_bitmap_set(lf->npa_bmp, aura_id);
 
@@ -750,6 +751,13 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 		goto qint_free;
 	}
 
+	/* Allocate per-aura attribute */
+	lf->aura_attr = plt_zmalloc(sizeof(struct npa_aura_attr) * nr_pools, 0);
+	if (lf->aura_attr == NULL) {
+		rc = NPA_ERR_PARAM;
+		goto lim_free;
+	}
+
 	/* Init aura start & end limits */
 	for (i = 0; i < nr_pools; i++) {
 		lf->aura_lim[i].ptr_start = UINT64_MAX;
@@ -758,6 +766,8 @@ npa_dev_init(struct npa_lf *lf, uintptr_t base, struct mbox *mbox)
 
 	return 0;
 
+lim_free:
+	plt_free(lf->aura_lim);
 qint_free:
 	plt_free(lf->npa_qint_mem);
 bmap_free:
@@ -780,6 +790,7 @@ npa_dev_fini(struct npa_lf *lf)
 	plt_free(lf->npa_qint_mem);
 	plt_bitmap_free(lf->npa_bmp);
 	plt_free(lf->npa_bmp_mem);
+	plt_free(lf->aura_attr);
 
 	return npa_lf_free(lf->mbox);
 }
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 69129cb4cc..fed1942404 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -714,6 +714,25 @@ int __roc_api roc_npa_dev_fini(struct roc_npa *roc_npa);
 /* Flags to pool create */
 #define ROC_NPA_ZERO_AURA_F BIT(0)
 
+/* Enumerations */
+enum roc_npa_buf_type {
+	/* Aura used for normal pkts */
+	ROC_NPA_BUF_TYPE_PACKET = 0,
+	/* Aura used for ipsec pkts */
+	ROC_NPA_BUF_TYPE_PACKET_IPSEC,
+	/* Aura used as vwqe for normal pkts */
+	ROC_NPA_BUF_TYPE_VWQE,
+	/* Aura used as vwqe for ipsec pkts */
+	ROC_NPA_BUF_TYPE_VWQE_IPSEC,
+	/* Aura used as SQB for SQ */
+	ROC_NPA_BUF_TYPE_SQB,
+	/* Aura used for general buffer */
+	ROC_NPA_BUF_TYPE_BUF,
+	/* Aura used for timeout pool */
+	ROC_NPA_BUF_TYPE_TIMEOUT,
+	ROC_NPA_BUF_TYPE_END,
+};
+
 /* NPA pool */
 int __roc_api roc_npa_pool_create(uint64_t *aura_handle, uint32_t block_size,
 				  uint32_t block_count, struct npa_aura_s *aura,
@@ -726,6 +745,9 @@ void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 					 uint64_t start_iova,
 					 uint64_t end_iova);
 uint64_t __roc_api roc_npa_zero_aura_handle(void);
+int __roc_api roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int cnt);
+uint64_t __roc_api roc_npa_buf_type_mask(uint64_t aura_handle);
+uint64_t __roc_api roc_npa_buf_type_limit_get(uint64_t type_mask);
 
 /* Init callbacks */
 typedef int (*roc_npa_lf_init_cb_t)(struct plt_pci_device *pci_dev);
diff --git a/drivers/common/cnxk/roc_npa_priv.h b/drivers/common/cnxk/roc_npa_priv.h
index de3d5448ba..d2118cc4fb 100644
--- a/drivers/common/cnxk/roc_npa_priv.h
+++ b/drivers/common/cnxk/roc_npa_priv.h
@@ -18,6 +18,7 @@ enum npa_error_status {
 
 struct npa_lf {
 	struct plt_intr_handle *intr_handle;
+	struct npa_aura_attr *aura_attr;
 	struct npa_aura_lim *aura_lim;
 	struct plt_pci_device *pci_dev;
 	struct plt_bitmap *npa_bmp;
@@ -25,6 +26,7 @@ struct npa_lf {
 	uint32_t stack_pg_ptrs;
 	uint32_t stack_pg_bytes;
 	uint16_t npa_msixoff;
+	bool zero_aura_rsvd;
 	void *npa_qint_mem;
 	void *npa_bmp_mem;
 	uint32_t nr_pools;
@@ -32,7 +34,7 @@ struct npa_lf {
 	uint8_t aura_sz;
 	uint32_t qints;
 	uintptr_t base;
-	bool zero_aura_rsvd;
+
 };
 
 struct npa_qint {
@@ -45,6 +47,10 @@ struct npa_aura_lim {
 	uint64_t ptr_end;
 };
 
+struct npa_aura_attr {
+	int buf_type[ROC_NPA_BUF_TYPE_END];
+};
+
 struct dev;
 
 static inline struct npa *
diff --git a/drivers/common/cnxk/roc_npa_type.c b/drivers/common/cnxk/roc_npa_type.c
new file mode 100644
index 0000000000..ed90138944
--- /dev/null
+++ b/drivers/common/cnxk/roc_npa_type.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+int
+roc_npa_buf_type_update(uint64_t aura_handle, enum roc_npa_buf_type type, int count)
+{
+	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	struct npa_lf *lf;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL || aura_id >= lf->nr_pools)
+		return NPA_ERR_PARAM;
+
+	if (plt_bitmap_get(lf->npa_bmp, aura_id)) {
+		plt_err("Cannot set buf type on unused aura");
+		return NPA_ERR_PARAM;
+	}
+
+	if (type >= ROC_NPA_BUF_TYPE_END || (lf->aura_attr[aura_id].buf_type[type] + count < 0)) {
+		plt_err("Pool buf type invalid");
+		return NPA_ERR_PARAM;
+	}
+
+	lf->aura_attr[aura_id].buf_type[type] += count;
+	plt_wmb();
+	return 0;
+}
+
+uint64_t
+roc_npa_buf_type_mask(uint64_t aura_handle)
+{
+	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+	uint64_t type_mask = 0;
+	struct npa_lf *lf;
+	int type;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL || aura_id >= lf->nr_pools) {
+		plt_err("Invalid aura id or lf");
+		return 0;
+	}
+
+	if (plt_bitmap_get(lf->npa_bmp, aura_id)) {
+		plt_err("Cannot get buf_type on unused aura");
+		return 0;
+	}
+
+	for (type = 0; type < ROC_NPA_BUF_TYPE_END; type++) {
+		if (lf->aura_attr[aura_id].buf_type[type])
+			type_mask |= BIT_ULL(type);
+	}
+
+	return type_mask;
+}
+
+uint64_t
+roc_npa_buf_type_limit_get(uint64_t type_mask)
+{
+	uint64_t wdata, reg;
+	uint64_t limit = 0;
+	struct npa_lf *lf;
+	uint64_t aura_id;
+	int64_t *addr;
+	uint64_t val;
+	int type;
+
+	lf = idev_npa_obj_get();
+	if (lf == NULL)
+		return NPA_ERR_PARAM;
+
+	for (aura_id = 0; aura_id < lf->nr_pools; aura_id++) {
+		if (plt_bitmap_get(lf->npa_bmp, aura_id))
+			continue;
+
+		/* Find aura's matching the buf_types requested */
+		if (type_mask != 0) {
+			val = 0;
+			for (type = 0; type < ROC_NPA_BUF_TYPE_END; type++) {
+				if (lf->aura_attr[aura_id].buf_type[type] != 0)
+					val |= BIT_ULL(type);
+			}
+			if ((val & type_mask) == 0)
+				continue;
+		}
+
+		wdata = aura_id << 44;
+		addr = (int64_t *)(lf->base + NPA_LF_AURA_OP_LIMIT);
+		reg = roc_atomic64_add_nosync(wdata, addr);
+
+		if (!(reg & BIT_ULL(42)))
+			limit += (reg & ROC_AURA_OP_LIMIT_MASK);
+	}
+
+	return limit;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6c05e893e3..6f3de2ab59 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -306,6 +306,9 @@ INTERNAL {
 	roc_nix_vlan_mcam_entry_write;
 	roc_nix_vlan_strip_vtag_ena_dis;
 	roc_nix_vlan_tpid_set;
+	roc_npa_buf_type_mask;
+	roc_npa_buf_type_limit_get;
+	roc_npa_buf_type_update;
 	roc_npa_aura_drop_set;
 	roc_npa_aura_limit_modify;
 	roc_npa_aura_op_range_set;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 11/32] common/cnxk: update attributes to pools used by NIX
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (8 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 10/32] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 12/32] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
                     ` (20 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Update attributes to pools used by NIX so that we
can later identify which mempools are packet pools
and which are used for Inline IPsec enabled ethdev.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_queue.c | 112 +++++++++++++++++++++++++++-
 1 file changed, 110 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 70b4516eca..98b9fb45f5 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -140,6 +140,96 @@ roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid)
 	return sso_enable ? true : false;
 }
 
+static int
+nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set)
+{
+	struct roc_nix *roc_nix = rq->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix);
+	uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0;
+	struct mbox *mbox = nix->dev.mbox;
+	uint64_t aura_base;
+	int rc, count;
+
+	count = set ? 1 : -1;
+	/* For buf type set, use info from RQ context */
+	if (set) {
+		lpb_aura = rq->aura_handle;
+		spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0;
+		vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0;
+		goto skip_ctx_read;
+	}
+
+	aura_base = roc_npa_aura_handle_to_base(rq->aura_handle);
+	if (roc_model_is_cn9k()) {
+		struct nix_aq_enq_rsp *rsp;
+		struct nix_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+	} else {
+		struct nix_cn10k_aq_enq_rsp *rsp;
+		struct nix_cn10k_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+		if (rsp->rq.vwqe_ena)
+			vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base);
+	}
+
+skip_ctx_read:
+	/* Update attributes for LPB aura */
+	if (inl_inb_ena)
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+	else
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+
+	/* Update attributes for SPB aura */
+	if (spb_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+		else
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+	}
+
+	/* Update attributes for VWQE aura */
+	if (vwqe_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count);
+		else
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count);
+	}
+
+	return 0;
+}
+
 int
 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		bool cfg, bool ena)
@@ -292,7 +382,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 			/* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
 			aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
 			aq->rq.vtime_wait = rq->vwqe_wait_tmo;
-			aq->rq.wqe_aura = rq->vwqe_aura_handle;
+			aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle);
 		}
 	} else {
 		/* CQ mode */
@@ -463,6 +553,9 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura buf type to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
@@ -481,6 +574,9 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rq->qid >= nix->nb_rx_queues)
 		return NIX_ERR_QUEUE_INVALID_RANGE;
 
+	/* Clear attributes for existing aura's */
+	nix_rq_aura_buf_type_update(rq, false);
+
 	rq->roc_nix = roc_nix;
 
 	if (is_cn9k)
@@ -495,14 +591,25 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura attribute to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
 int
 roc_nix_rq_fini(struct roc_nix_rq *rq)
 {
+	int rc;
+
 	/* Disabling RQ is sufficient */
-	return roc_nix_rq_ena_dis(rq, false);
+	rc = roc_nix_rq_ena_dis(rq, false);
+	if (rc)
+		return rc;
+
+	/* Update aura attribute to indicate its use for */
+	nix_rq_aura_buf_type_update(rq, false);
+	return 0;
 }
 
 int
@@ -717,6 +824,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	if (rc)
 		goto fail;
 
+	roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1);
 	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
 	if (sq->sqe_mem == NULL) {
 		rc = NIX_ERR_NO_MEM;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 12/32] common/cnxk: support zero aura for inline inbound meta
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (9 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 11/32] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 13/32] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
                     ` (19 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: jerinj, dev

Add support to create zero aura for inline inbound meta pkts when platform
supports it. Aura zero will hold as many buffers as all the available
pkt pool with a data to accommodate 384B in best case to store
meta packets coming from Inline IPsec.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_idev.c         |  10 ++
 drivers/common/cnxk/roc_idev.h         |   1 +
 drivers/common/cnxk/roc_idev_priv.h    |   9 ++
 drivers/common/cnxk/roc_nix.h          |   1 +
 drivers/common/cnxk/roc_nix_inl.c      | 211 +++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl.h      |   8 +
 drivers/common/cnxk/roc_nix_inl_dev.c  |   2 +
 drivers/common/cnxk/roc_nix_inl_priv.h |   4 +
 drivers/common/cnxk/roc_nix_priv.h     |   1 +
 drivers/common/cnxk/roc_nix_queue.c    |  19 +++
 drivers/common/cnxk/version.map        |   4 +
 11 files changed, 270 insertions(+)

diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index a08c7ce8fd..4d2eff93ce 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -241,3 +241,13 @@ idev_sso_set(struct roc_sso *sso)
 	if (idev != NULL)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
+
+uint64_t
+roc_idev_nix_inl_meta_aura_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->inl_cfg.meta_aura;
+	return 0;
+}
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 16793c2828..926aac0634 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -16,5 +16,6 @@ struct roc_cpt *__roc_api roc_idev_cpt_get(void);
 void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
+uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
 
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 46eebffcbb..315cc6f52c 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -10,6 +10,14 @@ struct npa_lf;
 struct roc_bphy;
 struct roc_cpt;
 struct nix_inl_dev;
+
+struct idev_nix_inl_cfg {
+	uint64_t meta_aura;
+	uint32_t nb_bufs;
+	uint32_t buf_sz;
+	uint32_t refs;
+};
+
 struct idev_cfg {
 	uint16_t sso_pf_func;
 	uint16_t npa_pf_func;
@@ -23,6 +31,7 @@ struct idev_cfg {
 	struct roc_cpt *cpt;
 	struct roc_sso *sso;
 	struct nix_inl_dev *nix_inl_dev;
+	struct idev_nix_inl_cfg inl_cfg;
 	plt_spinlock_t nix_inl_dev_lock;
 };
 
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index c9aaedc915..77e4d2919b 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -321,6 +321,7 @@ struct roc_nix_rq {
 	bool spb_drop_ena;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
+	uint64_t meta_aura_handle;
 	uint16_t inl_dev_refs;
 };
 
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index c621867e54..507a15315a 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -6,6 +6,7 @@
 #include "roc_priv.h"
 
 uint32_t soft_exp_consumer_cnt;
+roc_nix_inl_meta_pool_cb_t meta_pool_cb;
 
 PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
@@ -18,6 +19,155 @@ PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
 		  1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
 
+static int
+nix_inl_meta_aura_destroy(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	int rc;
+
+	if (!idev)
+		return -EINVAL;
+
+	inl_cfg = &idev->inl_cfg;
+	/* Destroy existing Meta aura */
+	if (inl_cfg->meta_aura) {
+		uint64_t avail, limit;
+
+		/* Check if all buffers are back to pool */
+		avail = roc_npa_aura_op_available(inl_cfg->meta_aura);
+		limit = roc_npa_aura_op_limit_get(inl_cfg->meta_aura);
+		if (avail != limit)
+			plt_warn("Not all buffers are back to meta pool,"
+				 " %" PRIu64 " != %" PRIu64, avail, limit);
+
+		rc = meta_pool_cb(&inl_cfg->meta_aura, 0, 0, true);
+		if (rc) {
+			plt_err("Failed to destroy meta aura, rc=%d", rc);
+			return rc;
+		}
+		inl_cfg->meta_aura = 0;
+		inl_cfg->buf_sz = 0;
+		inl_cfg->nb_bufs = 0;
+		inl_cfg->refs = 0;
+	}
+
+	return 0;
+}
+
+static int
+nix_inl_meta_aura_create(struct idev_cfg *idev, uint16_t first_skip)
+{
+	uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	struct idev_nix_inl_cfg *inl_cfg;
+	struct nix_inl_dev *nix_inl_dev;
+	uint32_t nb_bufs, buf_sz;
+	int rc;
+
+	inl_cfg = &idev->inl_cfg;
+	nix_inl_dev = idev->nix_inl_dev;
+
+	/* Override meta buf count from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+		nb_bufs = nix_inl_dev->nb_meta_bufs;
+	else
+		nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+	/* Override meta buf size from devargs if present */
+	if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+		buf_sz = nix_inl_dev->meta_buf_sz;
+	else
+		buf_sz = first_skip + NIX_INL_META_SIZE;
+
+	/* Allocate meta aura */
+	rc = meta_pool_cb(&inl_cfg->meta_aura, buf_sz, nb_bufs, false);
+	if (rc) {
+		plt_err("Failed to allocate meta aura, rc=%d", rc);
+		return rc;
+	}
+
+	inl_cfg->buf_sz = buf_sz;
+	inl_cfg->nb_bufs = nb_bufs;
+	return 0;
+}
+
+int
+roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct idev_nix_inl_cfg *inl_cfg;
+	uint32_t actual, expected;
+	uint64_t mask, type_mask;
+	int rc;
+
+	if (!idev || !meta_pool_cb)
+		return -EFAULT;
+	inl_cfg = &idev->inl_cfg;
+
+	/* Create meta aura if not present */
+	if (!inl_cfg->meta_aura) {
+		rc = nix_inl_meta_aura_create(idev, rq->first_skip);
+		if (rc)
+			return rc;
+	}
+
+	/* Validate if we have enough meta buffers */
+	mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+	expected = roc_npa_buf_type_limit_get(mask);
+	actual = inl_cfg->nb_bufs;
+
+	if (actual < expected) {
+		plt_err("Insufficient buffers in meta aura %u < %u (expected)",
+			actual, expected);
+		return -EIO;
+	}
+
+	/* Validate if we have enough space for meta buffer */
+	if (rq->first_skip + NIX_INL_META_SIZE > inl_cfg->buf_sz) {
+		plt_err("Meta buffer size %u not sufficient to meet RQ first skip %u",
+			inl_cfg->buf_sz, rq->first_skip);
+		return -EIO;
+	}
+
+	/* Validate if we have enough VWQE buffers */
+	if (rq->vwqe_ena) {
+		actual = roc_npa_aura_op_limit_get(rq->vwqe_aura_handle);
+
+		type_mask = roc_npa_buf_type_mask(rq->vwqe_aura_handle);
+		if (type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE_IPSEC) &&
+		    type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE)) {
+			/* VWQE aura shared b/w Inline enabled and non Inline
+			 * enabled ports needs enough buffers to store all the
+			 * packet buffers, one per vwqe.
+			 */
+			mask = (BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC) |
+				BIT_ULL(ROC_NPA_BUF_TYPE_PACKET));
+			expected = roc_npa_buf_type_limit_get(mask);
+
+			if (actual < expected) {
+				plt_err("VWQE aura shared b/w Inline inbound and non-Inline inbound "
+					"ports needs vwqe bufs(%u) minimum of all pkt bufs (%u)",
+					actual, expected);
+				return -EIO;
+			}
+		} else {
+			/* VWQE aura not shared b/w Inline and non Inline ports have relaxed
+			 * requirement of match all the meta buffers.
+			 */
+			expected = inl_cfg->nb_bufs;
+
+			if (actual < expected) {
+				plt_err("VWQE aura not shared b/w Inline inbound and non-Inline "
+					"ports needs vwqe bufs(%u) minimum of all meta bufs (%u)",
+					actual, expected);
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int
 nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 {
@@ -310,6 +460,10 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	if (rc)
 		return rc;
 
+	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+		nix->need_meta_aura = true;
+		idev->inl_cfg.refs++;
+	}
 	nix->inl_inb_ena = true;
 	return 0;
 }
@@ -317,12 +471,22 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 int
 roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 {
+	struct idev_cfg *idev = idev_get_cfg();
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 
 	if (!nix->inl_inb_ena)
 		return 0;
 
+	if (!idev)
+		return -EFAULT;
+
 	nix->inl_inb_ena = false;
+	if (nix->need_meta_aura) {
+		nix->need_meta_aura = false;
+		idev->inl_cfg.refs--;
+		if (!idev->inl_cfg.refs)
+			nix_inl_meta_aura_destroy();
+	}
 
 	/* Flush Inbound CTX cache entries */
 	roc_nix_cpt_ctx_cache_sync(roc_nix);
@@ -592,6 +756,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
 int
 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 {
+	struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	int port_id = rq->roc_nix->port_id;
 	struct nix_inl_dev *inl_dev;
@@ -603,6 +768,10 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 	if (idev == NULL)
 		return 0;
 
+	/* Update meta aura handle in RQ */
+	if (nix->need_meta_aura)
+		rq->meta_aura_handle = roc_npa_zero_aura_handle();
+
 	inl_dev = idev->nix_inl_dev;
 	/* Nothing to do if no inline device */
 	if (!inl_dev)
@@ -705,6 +874,13 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 		return rc;
 	}
 
+	/* Check meta aura */
+	if (enable && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	inl_rq->inl_dev_refs++;
 	rq->inl_dev_refs = 1;
 	return 0;
@@ -724,6 +900,7 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 	if (idev == NULL)
 		return 0;
 
+	rq->meta_aura_handle = 0;
 	if (!rq->inl_dev_refs)
 		return 0;
 
@@ -779,6 +956,9 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
 		rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
 		if (rc)
 			return rc;
+
+		if (enable && nix->need_meta_aura)
+			return roc_nix_inl_meta_aura_check(inl_rq);
 	}
 	return 0;
 }
@@ -792,6 +972,31 @@ roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
 	nix->inb_inl_dev = use_inl_dev;
 }
 
+void
+roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (!idev)
+		return;
+	/* Need to set here for cases when inbound SA table is
+	 * managed outside RoC.
+	 */
+	nix->inl_inb_ena = ena;
+	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+		if (ena) {
+			nix->need_meta_aura = true;
+			idev->inl_cfg.refs++;
+		} else if (nix->need_meta_aura) {
+			nix->need_meta_aura = false;
+			idev->inl_cfg.refs--;
+			if (!idev->inl_cfg.refs)
+				nix_inl_meta_aura_destroy();
+		}
+	}
+}
+
 int
 roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
 {
@@ -1128,3 +1333,9 @@ roc_nix_inl_dev_unlock(void)
 	if (idev != NULL)
 		plt_spinlock_unlock(&idev->nix_inl_dev_lock);
 }
+
+void
+roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
+{
+	meta_pool_cb = cb;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 702ec01384..9911a48b2d 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -121,6 +121,9 @@ roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(void *sa)
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 					  uint32_t soft_exp_event);
 
+typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle, uint32_t blk_sz, uint32_t nb_bufs,
+					  bool destroy);
+
 struct roc_nix_inl_dev {
 	/* Input parameters */
 	struct plt_pci_device *pci_dev;
@@ -135,6 +138,8 @@ struct roc_nix_inl_dev {
 	uint8_t spb_drop_pc;
 	uint8_t lpb_drop_pc;
 	bool set_soft_exp_poll;
+	uint32_t nb_meta_bufs;
+	uint32_t meta_buf_sz;
 	/* End of input parameters */
 
 #define ROC_NIX_INL_MEM_SZ (1280)
@@ -165,6 +170,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
 uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
 					   bool inl_dev_sa, uint32_t spi);
 void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
+void __roc_api roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena);
 int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
 int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
 bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
@@ -176,6 +182,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
 				       bool inb_inl_dev);
 int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
+int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq);
 
 /* NIX Inline Outbound API */
 int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@@ -191,6 +198,7 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
 int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
 						    bool poll);
 uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
 
 /* NIX Inline/Outbound API */
 enum roc_nix_inl_sa_sync_op {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 3a96498d64..1e9b2b95d7 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -841,6 +841,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
 	inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
 	inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
+	inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
+	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
 
 	if (roc_inl_dev->spb_drop_pc)
 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index a775efc637..ccd2adf982 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -6,6 +6,8 @@
 #include <pthread.h>
 #include <sys/types.h>
 
+#define NIX_INL_META_SIZE 384u
+
 struct nix_inl_dev;
 struct nix_inl_qint {
 	struct nix_inl_dev *inl_dev;
@@ -86,6 +88,8 @@ struct nix_inl_dev {
 	bool attach_cptlf;
 	uint16_t wqe_skip;
 	bool ts_ena;
+	uint32_t nb_meta_bufs;
+	uint32_t meta_buf_sz;
 };
 
 int nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index a3d4ddf5d5..a253f412de 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -202,6 +202,7 @@ struct nix {
 	uint16_t nb_cpt_lf;
 	uint16_t outb_se_ring_cnt;
 	uint16_t outb_se_ring_base;
+	bool need_meta_aura;
 	/* Mode provided by driver */
 	bool inb_inl_dev;
 
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 98b9fb45f5..b197de0a77 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -89,7 +89,12 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
 
 	rc = nix_rq_ena_dis(&nix->dev, rq, enable);
 	nix_rq_vwqe_flush(rq, nix->vwqe_interval);
+	if (rc)
+		return rc;
 
+	/* Check for meta aura if RQ is enabled */
+	if (enable && nix->need_meta_aura)
+		rc = roc_nix_inl_meta_aura_check(rq);
 	return rc;
 }
 
@@ -556,6 +561,13 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	/* Update aura buf type to indicate its use */
 	nix_rq_aura_buf_type_update(rq, true);
 
+	/* Check for meta aura if RQ is enabled */
+	if (ena && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	return nix_tel_node_add_rq(rq);
 }
 
@@ -594,6 +606,13 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	/* Update aura attribute to indicate its use */
 	nix_rq_aura_buf_type_update(rq, true);
 
+	/* Check for meta aura if RQ is enabled */
+	if (ena && nix->need_meta_aura) {
+		rc = roc_nix_inl_meta_aura_check(rq);
+		if (rc)
+			return rc;
+	}
+
 	return nix_tel_node_add_rq(rq);
 }
 
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6f3de2ab59..276fec3660 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -95,6 +95,7 @@ INTERNAL {
 	roc_idev_npa_maxpools_set;
 	roc_idev_npa_nix_get;
 	roc_idev_num_lmtlines_get;
+	roc_idev_nix_inl_meta_aura_get;
 	roc_model;
 	roc_se_auth_key_set;
 	roc_se_ciph_key_set;
@@ -156,7 +157,10 @@ INTERNAL {
 	roc_nix_inl_inb_sa_sz;
 	roc_nix_inl_inb_tag_update;
 	roc_nix_inl_inb_fini;
+	roc_nix_inl_inb_set;
 	roc_nix_inb_is_with_inl_dev;
+	roc_nix_inl_meta_aura_check;
+	roc_nix_inl_meta_pool_cb_register;
 	roc_nix_inb_mode_set;
 	roc_nix_inl_outb_fini;
 	roc_nix_inl_outb_init;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 13/32] net/cnxk: support for zero aura for inline meta
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (10 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 12/32] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 14/32] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
                     ` (18 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Nithin Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev

Add support for zero aura for inline meta pkts and register
callback to ROC to create meta pool via mempool. Also
add devargs to override meta buffer count and size.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c      |  8 +-
 drivers/event/cnxk/cn10k_worker.h        | 32 ++++----
 drivers/event/cnxk/cnxk_eventdev.h       |  1 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  2 +-
 drivers/net/cnxk/cn10k_ethdev.c          |  8 +-
 drivers/net/cnxk/cn10k_ethdev.h          |  2 +-
 drivers/net/cnxk/cn10k_rx.h              | 35 +++++----
 drivers/net/cnxk/cnxk_ethdev.c           |  3 +
 drivers/net/cnxk/cnxk_ethdev.h           |  2 +
 drivers/net/cnxk/cnxk_ethdev_sec.c       | 97 +++++++++++++++++++++++-
 10 files changed, 154 insertions(+), 36 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index fee01713b4..1774455b4c 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -694,7 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
 }
 
 static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, uint64_t meta_aura)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	int i;
@@ -703,6 +703,8 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 		struct cn10k_sso_hws *ws = event_dev->data->ports[i];
 		ws->lookup_mem = lookup_mem;
 		ws->tstamp = dev->tstamp;
+		if (meta_aura)
+			ws->meta_aura = meta_aura;
 	}
 }
 
@@ -713,6 +715,7 @@ cn10k_sso_rx_adapter_queue_add(
 	const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
 	struct cn10k_eth_rxq *rxq;
+	uint64_t meta_aura;
 	void *lookup_mem;
 	int rc;
 
@@ -726,7 +729,8 @@ cn10k_sso_rx_adapter_queue_add(
 		return -EINVAL;
 	rxq = eth_dev->data->rx_queues[0];
 	lookup_mem = rxq->lookup_mem;
-	cn10k_sso_set_priv_mem(event_dev, lookup_mem);
+	meta_aura = rxq->meta_aura;
+	cn10k_sso_set_priv_mem(event_dev, lookup_mem, meta_aura);
 	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
 	return 0;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index db56d96404..47ce423da2 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -127,12 +127,14 @@ cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
 }
 
 static __rte_always_inline void
-cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
-		   void *lookup_mem, void *tstamp, uintptr_t lbase)
+cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struct cn10k_sso_hws *ws)
 {
 	uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
+	struct cnxk_timesync_info *tstamp = ws->tstamp[port_id];
+	void *lookup_mem = ws->lookup_mem;
+	uintptr_t lbase = ws->lmt_base;
 	struct rte_event_vector *vec;
-	uint64_t aura_handle, laddr;
+	uint64_t meta_aura, laddr;
 	uint16_t nb_mbufs, non_vec;
 	uint16_t lmt_id, d_off;
 	struct rte_mbuf **wqe;
@@ -153,25 +155,31 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 	if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
 		mbuf_init |= 8;
 
+	meta_aura = ws->meta_aura;
 	nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
 	nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
-					      flags | NIX_RX_VWQE_F, lookup_mem,
-					      tstamp, lbase);
+					      flags | NIX_RX_VWQE_F,
+					      lookup_mem, tstamp,
+					      lbase, meta_aura);
 	wqe += nb_mbufs;
 	non_vec = vec->nb_elem - nb_mbufs;
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
+		uint64_t sg_w1;
+
 		mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] -
 					   sizeof(struct rte_mbuf));
 		/* Pick first mbuf's aura handle assuming all
 		 * mbufs are from a vec and are from same RQ.
 		 */
-		aura_handle = mbuf->pool->pool_id;
+		meta_aura = ws->meta_aura;
+		if (!meta_aura)
+			meta_aura = mbuf->pool->pool_id;
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 		laddr = lbase;
 		laddr += 8;
-		d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
-		d_off += (mbuf_init & 0xFFFF);
+		sg_w1 = *(uint64_t *)(((uintptr_t)wqe[0]) + 72);
+		d_off = sg_w1 - (uintptr_t)mbuf;
 		sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
 		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	}
@@ -208,7 +216,7 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
 
 	/* Free remaining meta buffers if any */
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
-		nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle);
+		nix_sec_flush_meta(laddr, lmt_id, loff, meta_aura);
 		plt_io_wmb();
 	}
 }
@@ -241,8 +249,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 			uint64_t cq_w5;
 
 			m = (struct rte_mbuf *)mbuf;
-			d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
-			d_off += RTE_PKTMBUF_HEADROOM;
+			d_off = (*(uint64_t *)(u64[1] + 72)) - (uintptr_t)m;
 
 			cq_w1 = *(uint64_t *)(u64[1] + 8);
 			cq_w5 = *(uint64_t *)(u64[1] + 40);
@@ -273,8 +280,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
 			   ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
 		*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
-		cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
-				   ws->tstamp[port], ws->lmt_base);
+		cn10k_process_vwqe(u64[1], port, flags, ws);
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index fae4484758..d61e60dd2d 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -148,6 +148,7 @@ struct cn10k_sso_hws {
 	uint8_t hws_id;
 	/* PTP timestamp */
 	struct cnxk_timesync_info **tstamp;
+	uint64_t meta_aura;
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 7937cadd25..5f51c504b5 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -194,7 +194,7 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
 
 	rq->vwqe_ena = 1;
 	rq->vwqe_first_skip = 0;
-	rq->vwqe_aura_handle = roc_npa_aura_handle_to_aura(vmp->pool_id);
+	rq->vwqe_aura_handle = vmp->pool_id;
 	rq->vwqe_max_sz_exp = rte_log2_u32(sz);
 	rq->vwqe_wait_tmo =
 		tmo_ns /
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 80c5c0e962..e8faeebe1f 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -282,9 +282,13 @@ cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		rxq->lmt_base = dev->nix.lmt_base;
 		rxq->sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix,
 							   dev->inb.inl_dev);
+		rxq->meta_aura = rq->meta_aura_handle;
+		rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+		/* Assume meta packet from normal aura if meta aura is not setup
+		 */
+		if (!rxq->meta_aura)
+			rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
 	}
-	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
-	rxq->aura_handle = rxq_sp->qconf.mp->pool_id;
 
 	/* Lookup mem */
 	rxq->lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h
index acfdbb66aa..d0a5b136e3 100644
--- a/drivers/net/cnxk/cn10k_ethdev.h
+++ b/drivers/net/cnxk/cn10k_ethdev.h
@@ -39,7 +39,7 @@ struct cn10k_eth_rxq {
 	uint16_t data_off;
 	uint64_t sa_base;
 	uint64_t lmt_base;
-	uint64_t aura_handle;
+	uint64_t meta_aura;
 	uint16_t rq;
 	struct cnxk_timesync_info *tstamp;
 } __plt_cache_aligned;
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 0f8790b8c7..2cd297eb82 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -877,7 +877,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
 	nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
-		aura_handle = rxq->aura_handle;
+		aura_handle = rxq->meta_aura;
 		sa_base = rxq->sa_base;
 		sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 		ROC_LMT_BASE_ID_GET(lbase, lmt_id);
@@ -984,7 +984,7 @@ static __rte_always_inline uint16_t
 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			   const uint16_t flags, void *lookup_mem,
 			   struct cnxk_timesync_info *tstamp,
-			   uintptr_t lmt_base)
+			   uintptr_t lmt_base, uint64_t meta_aura)
 {
 	struct cn10k_eth_rxq *rxq = args;
 	const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
@@ -1003,10 +1003,10 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
 	uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
-	uint64_t aura_handle, lbase, laddr;
 	uint8_t loff = 0, lnum = 0, shft = 0;
 	uint8x16_t f0, f1, f2, f3;
 	uint16_t lmt_id, d_off;
+	uint64_t lbase, laddr;
 	uint16_t packets = 0;
 	uint16_t pkts_left;
 	uintptr_t sa_base;
@@ -1035,6 +1035,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 
 	if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
 		if (flags & NIX_RX_VWQE_F) {
+			uint64_t sg_w1;
 			uint16_t port;
 
 			mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
@@ -1042,10 +1043,15 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			/* Pick first mbuf's aura handle assuming all
 			 * mbufs are from a vec and are from same RQ.
 			 */
-			aura_handle = mbuf0->pool->pool_id;
+			if (!meta_aura)
+				meta_aura = mbuf0->pool->pool_id;
 			/* Calculate offset from mbuf to actual data area */
-			d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
-			d_off += (mbuf_initializer & 0xFFFF);
+			/* Zero aura's first skip i.e mbuf setup might not match the actual
+			 * offset as first skip is taken from second pass RQ. So compute
+			 * using diff b/w first SG pointer and mbuf addr.
+			 */
+			sg_w1 = *(uint64_t *)((uintptr_t)mbufs[0] + 72);
+			d_off = (sg_w1 - (uint64_t)mbuf0);
 
 			/* Get SA Base from lookup tbl using port_id */
 			port = mbuf_initializer >> 48;
@@ -1053,7 +1059,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 
 			lbase = lmt_base;
 		} else {
-			aura_handle = rxq->aura_handle;
+			meta_aura = rxq->meta_aura;
 			d_off = rxq->data_off;
 			sa_base = rxq->sa_base;
 			lbase = rxq->lmt_base;
@@ -1721,7 +1727,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				/* Update aura handle */
 				*(uint64_t *)(laddr - 8) =
 					(((uint64_t)(15 & 0x1) << 32) |
-				    roc_npa_aura_handle_to_aura(aura_handle));
+				    roc_npa_aura_handle_to_aura(meta_aura));
 				loff = loff - 15;
 				shft += 3;
 
@@ -1744,14 +1750,14 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 				/* Update aura handle */
 				*(uint64_t *)(laddr - 8) =
 					(((uint64_t)(loff & 0x1) << 32) |
-				    roc_npa_aura_handle_to_aura(aura_handle));
+				    roc_npa_aura_handle_to_aura(meta_aura));
 
 				data = (data & ~(0x7UL << shft)) |
 				       (((uint64_t)loff >> 1) << shft);
 
 				/* Send up to 16 lmt lines of pointers */
 				nix_sec_flush_meta_burst(lmt_id, data, lnum + 1,
-							 aura_handle);
+							 meta_aura);
 				rte_io_wmb();
 				lnum = 0;
 				loff = 0;
@@ -1769,13 +1775,13 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 		/* Update aura handle */
 		*(uint64_t *)(laddr - 8) =
 			(((uint64_t)(loff & 0x1) << 32) |
-			 roc_npa_aura_handle_to_aura(aura_handle));
+			 roc_npa_aura_handle_to_aura(meta_aura));
 
 		data = (data & ~(0x7UL << shft)) |
 		       (((uint64_t)loff >> 1) << shft);
 
 		/* Send up to 16 lmt lines of pointers */
-		nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, aura_handle);
+		nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, meta_aura);
 		if (flags & NIX_RX_VWQE_F)
 			plt_io_wmb();
 	}
@@ -1803,7 +1809,7 @@ static inline uint16_t
 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			   const uint16_t flags, void *lookup_mem,
 			   struct cnxk_timesync_info *tstamp,
-			   uintptr_t lmt_base)
+			   uintptr_t lmt_base, uint64_t meta_aura)
 {
 	RTE_SET_USED(args);
 	RTE_SET_USED(mbufs);
@@ -1812,6 +1818,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 	RTE_SET_USED(lookup_mem);
 	RTE_SET_USED(tstamp);
 	RTE_SET_USED(lmt_base);
+	RTE_SET_USED(meta_aura);
 
 	return 0;
 }
@@ -2038,7 +2045,7 @@ NIX_RX_FASTPATH_MODES
 		void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts)      \
 	{                                                                      \
 		return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts,     \
-						  (flags), NULL, NULL, 0);     \
+						  (flags), NULL, NULL, 0, 0);  \
 	}
 
 #define NIX_RX_RECV_VEC_MSEG(fn, flags)                                        \
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 80ab3cfedd..85ad70e50b 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1732,6 +1732,9 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	roc_nix_mac_link_info_get_cb_register(nix,
 					      cnxk_eth_dev_link_status_get_cb);
 
+	/* Register callback for inline meta pool create */
+	roc_nix_inl_meta_pool_cb_register(cnxk_nix_inl_meta_pool_cb);
+
 	dev->eth_dev = eth_dev;
 	dev->configured = 0;
 	dev->ptype_disable = 0;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index f11a9a0b63..a4178cfeff 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -642,6 +642,8 @@ struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
 struct cnxk_eth_sec_sess *
 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
 			      struct rte_security_session *sess);
+int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
+			      bool destroy);
 
 /* Other private functions */
 int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 1de3454398..9304b1465d 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -4,10 +4,14 @@
 
 #include <cnxk_ethdev.h>
 
+#define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
+
 #define CNXK_NIX_INL_SELFTEST	      "selftest"
 #define CNXK_NIX_INL_IPSEC_IN_MIN_SPI "ipsec_in_min_spi"
 #define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
 #define CNXK_INL_CPT_CHANNEL	      "inl_cpt_channel"
+#define CNXK_NIX_INL_NB_META_BUFS     "nb_meta_bufs"
+#define CNXK_NIX_INL_META_BUF_SZ      "meta_buf_sz"
 
 struct inl_cpt_channel {
 	bool is_multi_channel;
@@ -28,6 +32,85 @@ bitmap_ctzll(uint64_t slab)
 	return __builtin_ctzll(slab);
 }
 
+int
+cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs, bool destroy)
+{
+	const char *mp_name = CNXK_NIX_INL_META_POOL_NAME;
+	struct rte_pktmbuf_pool_private mbp_priv;
+	struct npa_aura_s *aura;
+	struct rte_mempool *mp;
+	uint16_t first_skip;
+	int rc;
+
+	/* Destroy the mempool if requested */
+	if (destroy) {
+		mp = rte_mempool_lookup(mp_name);
+		if (!mp)
+			return -ENOENT;
+
+		if (mp->pool_id != *aura_handle) {
+			plt_err("Meta pool aura mismatch");
+			return -EINVAL;
+		}
+
+		plt_free(mp->pool_config);
+		rte_mempool_free(mp);
+
+		*aura_handle = 0;
+		return 0;
+	}
+
+	/* Need to make it similar to rte_pktmbuf_pool() for sake of OOP
+	 * support.
+	 */
+	mp = rte_mempool_create_empty(mp_name, nb_bufs, buf_sz, 0,
+				      sizeof(struct rte_pktmbuf_pool_private),
+				      SOCKET_ID_ANY, 0);
+	if (!mp) {
+		plt_err("Failed to create inline meta pool");
+		return -EIO;
+	}
+
+	/* Indicate to allocate zero aura */
+	aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
+	if (!aura) {
+		rc = -ENOMEM;
+		goto free_mp;
+	}
+	aura->ena = 1;
+	aura->pool_addr = 0x0;
+
+	rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
+					aura);
+	if (rc) {
+		plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
+		goto free_aura;
+	}
+
+	/* Init mempool private area */
+	first_skip = sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+	memset(&mbp_priv, 0, sizeof(mbp_priv));
+	mbp_priv.mbuf_data_room_size = (buf_sz - first_skip +
+					RTE_PKTMBUF_HEADROOM);
+	rte_pktmbuf_pool_init(mp, &mbp_priv);
+
+	/* Populate buffer */
+	rc = rte_mempool_populate_default(mp);
+	if (rc < 0) {
+		plt_err("Failed to create inline meta pool, rc=%d", rc);
+		goto free_aura;
+	}
+
+	rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
+	*aura_handle = mp->pool_id;
+	return 0;
+free_aura:
+	plt_free(aura);
+free_mp:
+	rte_mempool_free(mp);
+	return rc;
+}
+
 int
 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
 			 uint32_t spi)
@@ -128,7 +211,7 @@ struct rte_security_ops cnxk_eth_sec_ops = {
 };
 
 static int
-parse_ipsec_in_spi_range(const char *key, const char *value, void *extra_args)
+parse_val_u32(const char *key, const char *value, void *extra_args)
 {
 	RTE_SET_USED(key);
 	uint32_t val;
@@ -184,6 +267,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	uint32_t ipsec_in_min_spi = 0;
 	struct inl_cpt_channel cpt_channel;
 	struct rte_kvargs *kvlist;
+	uint32_t nb_meta_bufs = 0;
+	uint32_t meta_buf_sz = 0;
 	uint8_t selftest = 0;
 
 	memset(&cpt_channel, 0, sizeof(cpt_channel));
@@ -198,11 +283,15 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
 			   &selftest);
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MIN_SPI,
-			   &parse_ipsec_in_spi_range, &ipsec_in_min_spi);
+			   &parse_val_u32, &ipsec_in_min_spi);
 	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
-			   &parse_ipsec_in_spi_range, &ipsec_in_max_spi);
+			   &parse_val_u32, &ipsec_in_max_spi);
 	rte_kvargs_process(kvlist, CNXK_INL_CPT_CHANNEL, &parse_inl_cpt_channel,
 			   &cpt_channel);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_NB_META_BUFS, &parse_val_u32,
+			   &nb_meta_bufs);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_META_BUF_SZ, &parse_val_u32,
+			   &meta_buf_sz);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -212,6 +301,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	inl_dev->channel = cpt_channel.channel;
 	inl_dev->chan_mask = cpt_channel.mask;
 	inl_dev->is_multi_channel = cpt_channel.is_multi_channel;
+	inl_dev->nb_meta_bufs = nb_meta_bufs;
+	inl_dev->meta_buf_sz = meta_buf_sz;
 	return 0;
 exit:
 	return -EINVAL;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 14/32] common/cnxk: avoid the use of platform specific APIs
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (11 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 13/32] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 15/32] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
                     ` (17 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Replace the use of platform specific APIs with platform independent
APIs.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/roc_cpt.c    | 8 ++++----
 drivers/common/cnxk/roc_cpt.h    | 2 +-
 drivers/crypto/cnxk/cn9k_ipsec.c | 8 ++++----
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index d607bde3c4..6f0ee44b54 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -998,7 +998,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 }
 
 int
-roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
 		     uint16_t ctx_len, uint8_t egrp)
 {
 	union cpt_res_s res, *hw_res;
@@ -1019,9 +1019,9 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
 	inst.w4.s.param1 = 0;
 	inst.w4.s.param2 = 0;
 	inst.w4.s.dlen = ctx_len;
-	inst.dptr = rte_mempool_virt2iova(sa);
+	inst.dptr = sa;
 	inst.rptr = 0;
-	inst.w7.s.cptr = rte_mempool_virt2iova(sa);
+	inst.w7.s.cptr = sa;
 	inst.w7.s.egrp = egrp;
 
 	inst.w0.u64 = 0;
@@ -1029,7 +1029,7 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
 	inst.w3.u64 = 0;
 	inst.res_addr = (uintptr_t)hw_res;
 
-	rte_io_wmb();
+	plt_io_wmb();
 
 	do {
 		/* Copy CPT command to LMTLINE */
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 4e3a078a90..6953f2bdd3 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -173,7 +173,7 @@ void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
-int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa,
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa,
 				   uint8_t opcode, uint16_t ctx_len,
 				   uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 6d26b0cc01..78c181b4a4 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -82,8 +82,8 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	ctx_len = ret;
 	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->out_sa, opcode,
-				   ctx_len, egrp);
+	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->out_sa),
+				   opcode, ctx_len, egrp);
 
 	if (ret)
 		return ret;
@@ -174,8 +174,8 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	ctx_len = ret;
 	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->in_sa, opcode, ctx_len,
-				   egrp);
+	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->in_sa),
+				   opcode, ctx_len, egrp);
 	if (ret)
 		return ret;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 15/32] net/cnxk: use full context IPsec structures in fp
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (12 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 14/32] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 16/32] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
                     ` (16 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj, Pavan Nikhilesh,
	Shijith Thotton
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Use the Full context SA structures and command in IPsec fast path.
For inline outbound, populate CPT instruction as per Full context.
Add new macros and functions with respect to Full context.
Populate wqe ptr in CPT instruction with proper offset from mbuf.
Add option to override outbound inline sa iv for debug
Update mbuf len based on IP version in rx post process
purposes via environment variable. User can set env variable as:
export ETH_SEC_IV_OVR="0x0, 0x0,..."

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c          |   8 +-
 drivers/common/cnxk/roc_cpt.c                |   9 +-
 drivers/common/cnxk/roc_cpt.h                |   8 +-
 drivers/common/cnxk/roc_ie_on.h              |   6 +
 drivers/common/cnxk/roc_nix_inl.c            |  33 ++++--
 drivers/common/cnxk/roc_nix_inl.h            |  46 ++++++++
 drivers/common/cnxk/roc_nix_inl_dev.c        |   2 +-
 drivers/crypto/cnxk/cn9k_ipsec.c             |   8 +-
 drivers/event/cnxk/cn9k_worker.h             |  48 +++++---
 drivers/net/cnxk/cn9k_ethdev.h               |   3 +
 drivers/net/cnxk/cn9k_ethdev_sec.c           | 110 +++++++++++++++----
 drivers/net/cnxk/cn9k_rx.h                   |  43 +++++---
 drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c |  32 ++----
 13 files changed, 254 insertions(+), 102 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index dca8742be3..89ac900d90 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -1242,7 +1242,9 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 			ctx_len += sizeof(template->ip4);
 
 			ip4->version_ihl = RTE_IPV4_VHL_DEF;
-			ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
+			ip4->time_to_live = ipsec->tunnel.ipv4.ttl ?
+						    ipsec->tunnel.ipv4.ttl :
+						    0x40;
 			ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
 			if (ipsec->tunnel.ipv4.df)
 				frag_off |= RTE_IPV4_HDR_DF_FLAG;
@@ -1275,7 +1277,9 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 						 ((ipsec->tunnel.ipv6.flabel
 						   << RTE_IPV6_HDR_FL_SHIFT) &
 						  RTE_IPV6_HDR_FL_MASK));
-			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
+			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit ?
+						  ipsec->tunnel.ipv6.hlimit :
+						  0x40;
 			memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
 			       sizeof(struct in6_addr));
 			memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 6f0ee44b54..8fc072b9d0 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -277,7 +277,7 @@ roc_cpt_inline_ipsec_inb_cfg_read(struct roc_cpt *roc_cpt,
 
 int
 roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
-			     uint16_t param2)
+			     uint16_t param2, uint16_t opcode)
 {
 	struct cpt *cpt = roc_cpt_to_cpt_priv(roc_cpt);
 	struct cpt_rx_inline_lf_cfg_msg *req;
@@ -292,6 +292,7 @@ roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt, uint16_t param1,
 	req->sso_pf_func = idev_sso_pffunc_get();
 	req->param1 = param1;
 	req->param2 = param2;
+	req->opcode = opcode;
 
 	return mbox_process(mbox);
 }
@@ -998,7 +999,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 }
 
 int
-roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, bool inb,
 		     uint16_t ctx_len, uint8_t egrp)
 {
 	union cpt_res_s res, *hw_res;
@@ -1014,7 +1015,9 @@ roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, uint8_t opcode,
 
 	hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
 
-	inst.w4.s.opcode_major = opcode;
+	inst.w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
+	if (inb)
+		inst.w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	inst.w4.s.opcode_minor = ctx_len >> 3;
 	inst.w4.s.param1 = 0;
 	inst.w4.s.param2 = 0;
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 6953f2bdd3..9a79998705 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -161,7 +161,8 @@ int __roc_api roc_cpt_inline_ipsec_cfg(struct dev *dev, uint8_t slot,
 int __roc_api roc_cpt_inline_ipsec_inb_cfg_read(
 	struct roc_cpt *roc_cpt, struct nix_inline_ipsec_cfg *inb_cfg);
 int __roc_api roc_cpt_inline_ipsec_inb_cfg(struct roc_cpt *roc_cpt,
-					   uint16_t param1, uint16_t param2);
+					   uint16_t param1, uint16_t param2,
+					   uint16_t opcode);
 int __roc_api roc_cpt_afs_print(struct roc_cpt *roc_cpt);
 int __roc_api roc_cpt_lfs_print(struct roc_cpt *roc_cpt);
 void __roc_api roc_cpt_iq_disable(struct roc_cpt_lf *lf);
@@ -173,7 +174,6 @@ void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
-int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa,
-				   uint8_t opcode, uint16_t ctx_len,
-				   uint8_t egrp);
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, uint64_t sa, bool inb,
+				   uint16_t ctx_len, uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 2d93cb609c..961d5fc95e 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -13,6 +13,12 @@
 #define ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC 0x23
 #define ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC  0x24
 
+#define ROC_IE_ON_INB_MAX_CTX_LEN	       34UL
+#define ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT  (1 << 12)
+#define ROC_IE_ON_OUTB_MAX_CTX_LEN	       31UL
+#define ROC_IE_ON_OUTB_IKEV2_SINGLE_SA_SUPPORT (1 << 9)
+#define ROC_IE_ON_OUTB_PER_PKT_IV	       (1 << 11)
+
 /* Ucode completion codes */
 enum roc_ie_on_ucc_ipsec {
 	ROC_IE_ON_UCC_SUCCESS = 0,
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 507a15315a..be0b8066c7 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -8,11 +8,11 @@
 uint32_t soft_exp_consumer_cnt;
 roc_nix_inl_meta_pool_cb_t meta_pool_cb;
 
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
-		  1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ == 512);
-PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ ==
-		  1UL << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ ==
+		  1UL << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_INB_SA_SZ == 1024);
+PLT_STATIC_ASSERT(ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ ==
+		  1UL << ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ ==
 		  1UL << ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
 PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_INB_SA_SZ == 1024);
@@ -184,7 +184,7 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
 
 	/* CN9K SA size is different */
 	if (roc_model_is_cn9k())
-		inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
+		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
 	else
 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
 
@@ -422,7 +422,9 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct idev_cfg *idev = idev_get_cfg();
 	struct roc_cpt *roc_cpt;
+	uint16_t opcode;
 	uint16_t param1;
+	uint16_t param2;
 	int rc;
 
 	if (idev == NULL)
@@ -439,17 +441,23 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	}
 
 	if (roc_model_is_cn9k()) {
-		param1 = ROC_ONF_IPSEC_INB_MAX_L2_SZ;
+		param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
+		param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
+		opcode =
+			((ROC_IE_ON_INB_MAX_CTX_LEN << 8) |
+			 (ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6)));
 	} else {
 		union roc_ot_ipsec_inb_param1 u;
 
 		u.u16 = 0;
 		u.s.esp_trailer_disable = 1;
 		param1 = u.u16;
+		param2 = 0;
+		opcode = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC | (1 << 6));
 	}
 
 	/* Do onetime Inbound Inline config in CPTPF */
-	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, 0);
+	rc = roc_cpt_inline_ipsec_inb_cfg(roc_cpt, param1, param2, opcode);
 	if (rc && rc != -EEXIST) {
 		plt_err("Failed to setup inbound lf, rc=%d", rc);
 		return rc;
@@ -605,7 +613,7 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 
 	/* CN9K SA size is different */
 	if (roc_model_is_cn9k())
-		sa_sz = ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ;
+		sa_sz = ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ;
 	else
 		sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
 	/* Alloc contiguous memory of outbound SA */
@@ -1212,7 +1220,12 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
 
 	/* Nothing much to do on cn9k */
 	if (roc_model_is_cn9k()) {
-		plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+		nix = roc_nix_to_nix_priv(roc_nix);
+		outb_lf = nix->cpt_lf_base;
+		rc = roc_on_cpt_ctx_write(outb_lf, (uint64_t)sa_dptr, inb,
+					  sa_len, ROC_CPT_DFLT_ENG_GRP_SE_IE);
+		if (rc)
+			return rc;
 		return 0;
 	}
 
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 9911a48b2d..555cb28c1a 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -22,6 +22,24 @@
 	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
 #define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
 
+/* ON INB HW area */
+#define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ                                         \
+	PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN)
+/* ON INB SW reserved area */
+#define ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD 640
+#define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ                                         \
+	(ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD)
+#define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10
+
+/* ONF OUTB HW area */
+#define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ                                        \
+	PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN)
+/* ONF OUTB SW reserved area */
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ                                        \
+	(ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD)
+#define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2 9
+
 /* OT INB HW area */
 #define ROC_NIX_INL_OT_IPSEC_INB_HW_SZ                                         \
 	PLT_ALIGN(sizeof(struct roc_ot_ipsec_inb_sa), ROC_ALIGN)
@@ -61,6 +79,34 @@
 #define ROC_NIX_INL_REAS_ZOMBIE_LIMIT	  0xFFF
 #define ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD 10
 
+static inline struct roc_ie_on_inb_sa *
+roc_nix_inl_on_ipsec_inb_sa(uintptr_t base, uint64_t idx)
+{
+	uint64_t off = idx << ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2;
+
+	return PLT_PTR_ADD(base, off);
+}
+
+static inline struct roc_ie_on_outb_sa *
+roc_nix_inl_on_ipsec_outb_sa(uintptr_t base, uint64_t idx)
+{
+	uint64_t off = idx << ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ_LOG2;
+
+	return PLT_PTR_ADD(base, off);
+}
+
+static inline void *
+roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(void *sa)
+{
+	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_INB_HW_SZ);
+}
+
+static inline void *
+roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa)
+{
+	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ);
+}
+
 static inline struct roc_onf_ipsec_inb_sa *
 roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
 {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 1e9b2b95d7..4fe7b5180b 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -394,7 +394,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
 
 	/* CN9K SA is different */
 	if (roc_model_is_cn9k())
-		inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
+		inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
 	else
 		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
 
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 78c181b4a4..84915581fc 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -29,7 +29,6 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
 	size_t ctx_len;
-	uint8_t opcode;
 	uint8_t egrp;
 	int ret;
 
@@ -80,10 +79,9 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 		return ret;
 
 	ctx_len = ret;
-	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
 	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->out_sa),
-				   opcode, ctx_len, egrp);
+				   false, ctx_len, egrp);
 
 	if (ret)
 		return ret;
@@ -133,7 +131,6 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
 	size_t ctx_len = 0;
-	uint8_t opcode;
 	uint8_t egrp;
 	int ret = 0;
 
@@ -172,10 +169,9 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 		sa->esn_en = 1;
 
 	ctx_len = ret;
-	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
 	ret = roc_on_cpt_ctx_write(&qp->lf, rte_mempool_virt2iova(&sa->in_sa),
-				   opcode, ctx_len, egrp);
+				   true, ctx_len, egrp);
 	if (ret)
 		return ret;
 
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index d86cb94a77..384b428ed1 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -617,12 +617,14 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 	struct nix_send_hdr_s *send_hdr;
 	uint64_t sa_base = txq->sa_base;
 	uint32_t pkt_len, dlen_adj, rlen;
+	struct roc_ie_on_outb_hdr *hdr;
 	uint64x2_t cmd01, cmd23;
 	uint64_t lmt_status, sa;
 	union nix_send_sg_s *sg;
+	uint32_t esn_lo, esn_hi;
 	uintptr_t dptr, nixtx;
 	uint64_t ucode_cmd[4];
-	uint64_t esn, *iv;
+	uint64_t esn;
 	uint8_t l2_len;
 
 	mdata.u64 = *rte_security_dynfield(m);
@@ -661,14 +663,19 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 
 	/* Load opcode and cptr already prepared at pkt metadata set */
 	pkt_len -= l2_len;
-	pkt_len += sizeof(struct roc_onf_ipsec_outb_hdr) +
-		    ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
+	pkt_len += (sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN) +
+		   ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
 	sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 
-	sa = (uintptr_t)roc_nix_inl_onf_ipsec_outb_sa(sa_base, mdata.sa_idx);
+	sa = (uintptr_t)roc_nix_inl_on_ipsec_outb_sa(sa_base, mdata.sa_idx);
 	ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
-	ucode_cmd[0] = (ROC_IE_ONF_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 |
-			0x40UL << 48 | pkt_len);
+	ucode_cmd[0] = (((ROC_IE_ON_OUTB_MAX_CTX_LEN << 8) |
+			 ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC)
+				<< 48 |
+			(ROC_IE_ON_OUTB_IKEV2_SINGLE_SA_SUPPORT |
+			 (ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ >>
+			  3)) << 32 |
+			pkt_len);
 
 	/* CPT Word 0 and Word 1 */
 	cmd01 = vdupq_n_u64((nixtx + 16) | (cn9k_nix_tx_ext_subs(flags) + 1));
@@ -678,35 +685,40 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
 	/* CPT word 2 and 3 */
 	cmd23 = vdupq_n_u64(0);
 	cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) |
-				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
-	cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
+				CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20),
+			       cmd23, 0);
+	cmd23 = vsetq_lane_u64(((uintptr_t)m + sizeof(struct rte_mbuf)) | 1,
+			       cmd23, 1);
 
 	dptr += l2_len - ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ -
-		sizeof(struct roc_onf_ipsec_outb_hdr);
+		(sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN);
 	ucode_cmd[1] = dptr;
 	ucode_cmd[2] = dptr;
 
-	/* Update IV to zero and l2 sz */
-	*(uint16_t *)(dptr + sizeof(struct roc_onf_ipsec_outb_hdr)) =
+	/* Update l2 sz */
+	*(uint16_t *)(dptr + (sizeof(struct roc_ie_on_outb_hdr) -
+			      ROC_IE_ON_MAX_IV_LEN)) =
 		rte_cpu_to_be_16(ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ);
-	iv = (uint64_t *)(dptr + 8);
-	iv[0] = 0;
-	iv[1] = 0;
 
 	/* Head wait if needed */
 	if (base)
 		roc_sso_hws_head_wait(base);
 
 	/* ESN */
-	outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
+	outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd((void *)sa);
 	esn = outb_priv->esn;
 	outb_priv->esn = esn + 1;
 
 	ucode_cmd[0] |= (esn >> 32) << 16;
-	esn = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+	esn_lo = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+	esn_hi = rte_cpu_to_be_32(esn >> 32);
 
-	/* Update ESN and IPID and IV */
-	*(uint64_t *)dptr = esn << 32 | esn;
+	/* Update ESN, IPID and IV */
+	hdr = (struct roc_ie_on_outb_hdr *)dptr;
+	hdr->ip_id = esn_lo;
+	hdr->seq = esn_lo;
+	hdr->esn = esn_hi;
+	hdr->df_tos = 0;
 
 	rte_io_wmb();
 	cn9k_sso_txq_fc_wait(txq);
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 449729f0c5..472a4b06da 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -79,6 +79,9 @@ struct cn9k_outb_priv_data {
 
 	/* Back pointer to eth sec session */
 	struct cnxk_eth_sec_sess *eth_sec;
+
+	/* IV in DBG mode */
+	uint8_t iv_dbg[ROC_IE_ON_MAX_IV_LEN];
 };
 
 struct cn9k_sec_sess_priv {
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 4dd0b6185e..2b2dca8b51 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -134,6 +134,37 @@ ar_window_init(struct cn9k_inb_priv_data *inb_priv)
 	return 0;
 }
 
+static void
+outb_dbg_iv_update(struct roc_ie_on_common_sa *common_sa, const char *__iv_str)
+{
+	uint8_t *iv_dbg = common_sa->iv.aes_iv;
+	char *iv_str = strdup(__iv_str);
+	char *iv_b = NULL;
+	char *save;
+	int i, iv_len = ROC_IE_ON_MAX_IV_LEN;
+
+	if (!iv_str)
+		return;
+
+	if (common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
+	    common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
+	    common_sa->ctl.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
+	    common_sa->ctl.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
+		iv_dbg = common_sa->iv.gcm.iv;
+		iv_len = 8;
+	}
+
+	memset(iv_dbg, 0, iv_len);
+	for (i = 0; i < iv_len; i++) {
+		iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
+		if (!iv_b)
+			break;
+		iv_dbg[i] = strtoul(iv_b, NULL, 0);
+	}
+
+	free(iv_str);
+}
+
 static int
 cn9k_eth_sec_session_create(void *device,
 			    struct rte_security_session_conf *conf,
@@ -150,6 +181,7 @@ cn9k_eth_sec_session_create(void *device,
 	rte_spinlock_t *lock;
 	char tbuf[128] = {0};
 	bool inbound;
+	int ctx_len;
 	int rc = 0;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
@@ -183,21 +215,25 @@ cn9k_eth_sec_session_create(void *device,
 	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
 	sess_priv.u64 = 0;
 
+	if (!dev->outb.lf_base) {
+		plt_err("Could not allocate security session private data");
+		return -ENOMEM;
+	}
+
 	if (inbound) {
 		struct cn9k_inb_priv_data *inb_priv;
-		struct roc_onf_ipsec_inb_sa *inb_sa;
+		struct roc_ie_on_inb_sa *inb_sa;
 		uint32_t spi_mask;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
-				  ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
+				  ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD);
 
 		spi_mask = roc_nix_inl_inb_spi_range(nix, false, NULL, NULL);
 
 		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
 		 * device always for CN9K.
 		 */
-		inb_sa = (struct roc_onf_ipsec_inb_sa *)
-			 roc_nix_inl_inb_sa_get(nix, false, ipsec->spi);
+		inb_sa = (struct roc_ie_on_inb_sa *)roc_nix_inl_inb_sa_get(nix, false, ipsec->spi);
 		if (!inb_sa) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to create ingress sa");
@@ -206,7 +242,7 @@ cn9k_eth_sec_session_create(void *device,
 		}
 
 		/* Check if SA is already in use */
-		if (inb_sa->ctl.valid) {
+		if (inb_sa->common_sa.ctl.valid) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Inbound SA with SPI %u already in use",
 				 ipsec->spi);
@@ -214,17 +250,26 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
-		memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
+		memset(inb_sa, 0, sizeof(struct roc_ie_on_inb_sa));
 
 		/* Fill inbound sa params */
-		rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
-		if (rc) {
+		rc = cnxk_on_ipsec_inb_sa_create(ipsec, crypto, inb_sa);
+		if (rc < 0) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to init inbound sa, rc=%d", rc);
 			goto mempool_put;
 		}
 
-		inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
+		ctx_len = rc;
+		rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa, inb_sa, inbound,
+					   ctx_len);
+		if (rc) {
+			snprintf(tbuf, sizeof(tbuf),
+				 "Failed to create inbound sa, rc=%d", rc);
+			goto mempool_put;
+		}
+
+		inb_priv = roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(inb_sa);
 		/* Back pointer to get eth_sec */
 		inb_priv->eth_sec = eth_sec;
 
@@ -253,27 +298,38 @@ cn9k_eth_sec_session_create(void *device,
 		dev->inb.nb_sess++;
 	} else {
 		struct cn9k_outb_priv_data *outb_priv;
-		struct roc_onf_ipsec_outb_sa *outb_sa;
 		uintptr_t sa_base = dev->outb.sa_base;
 		struct cnxk_ipsec_outb_rlens *rlens;
+		struct roc_ie_on_outb_sa *outb_sa;
+		const char *iv_str;
 		uint32_t sa_idx;
 
 		PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
-				  ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
+				  ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD);
 
 		/* Alloc an sa index */
 		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, 0);
 		if (rc)
 			goto mempool_put;
 
-		outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
-		outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
+		outb_sa = roc_nix_inl_on_ipsec_outb_sa(sa_base, sa_idx);
+		outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(outb_sa);
 		rlens = &outb_priv->rlens;
 
-		memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
+		memset(outb_sa, 0, sizeof(struct roc_ie_on_outb_sa));
 
 		/* Fill outbound sa params */
-		rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
+		rc = cnxk_on_ipsec_outb_sa_create(ipsec, crypto, outb_sa);
+		if (rc < 0) {
+			snprintf(tbuf, sizeof(tbuf),
+				 "Failed to init outbound sa, rc=%d", rc);
+			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+			goto mempool_put;
+		}
+
+		ctx_len = rc;
+		rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa, outb_sa, inbound,
+					   ctx_len);
 		if (rc) {
 			snprintf(tbuf, sizeof(tbuf),
 				 "Failed to init outbound sa, rc=%d", rc);
@@ -281,6 +337,18 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
+		/* Always enable explicit IV.
+		 * Copy the IV from application only when iv_gen_disable flag is
+		 * set
+		 */
+		outb_sa->common_sa.ctl.explicit_iv_en = 1;
+
+		if (conf->ipsec.options.iv_gen_disable == 1) {
+			iv_str = getenv("ETH_SEC_IV_OVR");
+			if (iv_str)
+				outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
+		}
+
 		/* Save userdata */
 		outb_priv->userdata = conf->userdata;
 		outb_priv->sa_idx = sa_idx;
@@ -288,8 +356,8 @@ cn9k_eth_sec_session_create(void *device,
 		/* Start sequence number with 1 */
 		outb_priv->seq = 1;
 
-		memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
-		if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
+		memcpy(&outb_priv->nonce, outb_sa->common_sa.iv.gcm.nonce, 4);
+		if (outb_sa->common_sa.ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
 			outb_priv->copy_salt = 1;
 
 		/* Save rlen info */
@@ -337,9 +405,9 @@ cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
-	struct roc_onf_ipsec_outb_sa *outb_sa;
-	struct roc_onf_ipsec_inb_sa *inb_sa;
 	struct cnxk_eth_sec_sess *eth_sec;
+	struct roc_ie_on_outb_sa *outb_sa;
+	struct roc_ie_on_inb_sa *inb_sa;
 	struct rte_mempool *mp;
 	rte_spinlock_t *lock;
 
@@ -353,14 +421,14 @@ cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
 	if (eth_sec->inb) {
 		inb_sa = eth_sec->sa;
 		/* Disable SA */
-		inb_sa->ctl.valid = 0;
+		inb_sa->common_sa.ctl.valid = 0;
 
 		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
 		dev->inb.nb_sess--;
 	} else {
 		outb_sa = eth_sec->sa;
 		/* Disable SA */
-		outb_sa->ctl.valid = 0;
+		outb_sa->common_sa.ctl.valid = 0;
 
 		/* Release Outbound SA index */
 		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h
index 25a4927a33..1a9f920b41 100644
--- a/drivers/net/cnxk/cn9k_rx.h
+++ b/drivers/net/cnxk/cn9k_rx.h
@@ -171,7 +171,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 }
 
 static inline int
-ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
+ipsec_antireplay_check(struct roc_ie_on_inb_sa *sa,
 		       struct cn9k_inb_priv_data *priv, uintptr_t data,
 		       uint32_t win_sz)
 {
@@ -183,7 +183,7 @@ ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
 	uint8_t esn;
 	int rc;
 
-	esn = sa->ctl.esn_en;
+	esn = sa->common_sa.ctl.esn_en;
 	seql = rte_be_to_cpu_32(*((uint32_t *)(data + IPSEC_SQ_LO_IDX)));
 
 	if (!esn) {
@@ -200,11 +200,12 @@ ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
 	rte_spinlock_lock(&ar->lock);
 	rc = cnxk_on_anti_replay_check(seq, ar, win_sz);
 	if (esn && !rc) {
-		seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->esn_hi) << 32) |
-			    rte_be_to_cpu_32(sa->esn_low);
+		seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->common_sa.seq_t.th)
+			     << 32) |
+			    rte_be_to_cpu_32(sa->common_sa.seq_t.tl);
 		if (seq > seq_in_sa) {
-			sa->esn_low = rte_cpu_to_be_32(seql);
-			sa->esn_hi = rte_cpu_to_be_32(seqh);
+			sa->common_sa.seq_t.tl = rte_cpu_to_be_32(seql);
+			sa->common_sa.seq_t.th = rte_cpu_to_be_32(seqh);
 		}
 	}
 	rte_spinlock_unlock(&ar->lock);
@@ -266,9 +267,10 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	const union nix_rx_parse_u *rx =
 		(const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
 	struct cn9k_inb_priv_data *sa_priv;
-	struct roc_onf_ipsec_inb_sa *sa;
+	struct roc_ie_on_inb_sa *sa;
 	uint8_t lcptr = rx->lcptr;
-	struct rte_ipv4_hdr *ipv4;
+	struct rte_ipv4_hdr *ip;
+	struct rte_ipv6_hdr *ip6;
 	uint16_t data_off, res;
 	uint32_t spi, win_sz;
 	uint32_t spi_mask;
@@ -279,6 +281,7 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	res = *(uint64_t *)(res_sg0 + 8);
 	data_off = *rearm_val & (BIT_ULL(16) - 1);
 	data = (uintptr_t)m->buf_addr;
+
 	data += data_off;
 
 	rte_prefetch0((void *)data);
@@ -294,10 +297,10 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	sa_w = sa_base & (ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 	spi_mask = (1ULL << sa_w) - 1;
-	sa = roc_nix_inl_onf_ipsec_inb_sa(sa_base, spi & spi_mask);
+	sa = roc_nix_inl_on_ipsec_inb_sa(sa_base, spi & spi_mask);
 
 	/* Update dynamic field with userdata */
-	sa_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(sa);
+	sa_priv = roc_nix_inl_on_ipsec_inb_sa_sw_rsvd(sa);
 	dw = *(__uint128_t *)sa_priv;
 	*rte_security_dynfield(m) = (uint64_t)dw;
 
@@ -309,16 +312,26 @@ nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
 	}
 
 	/* Get total length from IPv4 header. We can assume only IPv4 */
-	ipv4 = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
-				       ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+	ip = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
+				     ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+
+	if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
+	    IPVERSION) {
+		*len = rte_be_to_cpu_16(ip->total_length) + lcptr;
+	} else {
+		PLT_ASSERT(((ip->version_ihl & 0xf0) >>
+			    RTE_IPV4_IHL_MULTIPLIER) == 6);
+		ip6 = (struct rte_ipv6_hdr *)ip;
+		*len = rte_be_to_cpu_16(ip6->payload_len) +
+		       sizeof(struct rte_ipv6_hdr) + lcptr;
+	}
 
 	/* Update data offset */
-	data_off += (ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
-		     ROC_ONF_IPSEC_INB_MAX_L2_SZ);
+	data_off +=
+		(ROC_ONF_IPSEC_INB_SPI_SEQ_SZ + ROC_ONF_IPSEC_INB_MAX_L2_SZ);
 	*rearm_val = *rearm_val & ~(BIT_ULL(16) - 1);
 	*rearm_val |= data_off;
 
-	*len = rte_be_to_cpu_16(ipv4->total_length) + lcptr;
 	return RTE_MBUF_F_RX_SEC_OFFLOAD;
 }
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c b/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
index bfdbd1ee5d..dd8b7a525c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec_telemetry.c
@@ -14,59 +14,47 @@
 static int
 copy_outb_sa_9k(struct rte_tel_data *d, uint32_t i, void *sa)
 {
-	struct roc_onf_ipsec_outb_sa *out_sa;
 	union {
-		struct roc_ie_onf_sa_ctl ctl;
+		struct roc_ie_on_sa_ctl ctl;
 		uint64_t u64;
 	} w0;
+	struct roc_ie_on_outb_sa *out_sa;
 	char strw0[W0_MAXLEN];
 	char str[STR_MAXLEN];
 
-	out_sa = (struct roc_onf_ipsec_outb_sa *)sa;
-	w0.ctl = out_sa->ctl;
+	out_sa = (struct roc_ie_on_outb_sa *)sa;
+	w0.ctl = out_sa->common_sa.ctl;
 
 	snprintf(str, sizeof(str), "outsa_w0_%u", i);
 	snprintf(strw0, sizeof(strw0), "%" PRIu64, w0.u64);
 	rte_tel_data_add_dict_string(d, str, strw0);
 
-	snprintf(str, sizeof(str), "outsa_src_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->udp_src);
-
-	snprintf(str, sizeof(str), "outsa_dst_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->udp_dst);
-
-	snprintf(str, sizeof(str), "outsa_isrc_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->ip_src);
-
-	snprintf(str, sizeof(str), "outsa_idst_%u", i);
-	rte_tel_data_add_dict_u64(d, str, out_sa->ip_dst);
-
 	return 0;
 }
 
 static int
 copy_inb_sa_9k(struct rte_tel_data *d, uint32_t i, void *sa)
 {
-	struct roc_onf_ipsec_inb_sa *in_sa;
 	union {
-		struct roc_ie_onf_sa_ctl ctl;
+		struct roc_ie_on_sa_ctl ctl;
 		uint64_t u64;
 	} w0;
+	struct roc_ie_on_inb_sa *in_sa;
 	char strw0[W0_MAXLEN];
 	char str[STR_MAXLEN];
 
-	in_sa = (struct roc_onf_ipsec_inb_sa *)sa;
-	w0.ctl = in_sa->ctl;
+	in_sa = (struct roc_ie_on_inb_sa *)sa;
+	w0.ctl = in_sa->common_sa.ctl;
 
 	snprintf(str, sizeof(str), "insa_w0_%u", i);
 	snprintf(strw0, sizeof(strw0), "%" PRIu64, w0.u64);
 	rte_tel_data_add_dict_string(d, str, strw0);
 
 	snprintf(str, sizeof(str), "insa_esnh_%u", i);
-	rte_tel_data_add_dict_u64(d, str, in_sa->esn_hi);
+	rte_tel_data_add_dict_u64(d, str, in_sa->common_sa.seq_t.th);
 
 	snprintf(str, sizeof(str), "insa_esnl_%u", i);
-	rte_tel_data_add_dict_u64(d, str, in_sa->esn_low);
+	rte_tel_data_add_dict_u64(d, str, in_sa->common_sa.seq_t.tl);
 
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 16/32] net/cnxk: add crypto capabilities for HMAC-SHA2
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (13 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 15/32] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 17/32] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
                     ` (15 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Add capabilities for HMAC_SHA2 and udp encap for 9k
security offload in inline mode.
Set explicit IV mode in IPsec context when IV is provided by the
application

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 79 +++++++++++++++++++++++++++---
 1 file changed, 71 insertions(+), 8 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 2b2dca8b51..8c6e1c1765 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -80,6 +80,66 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* SHA256 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 16
+				},
+			}, }
+		}, }
+	},
+	{	/* SHA384 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 24,
+					.max = 48,
+					.increment = 24
+					},
+			}, }
+		}, }
+	},
+	{	/* SHA512 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+				.block_size = 128,
+				.key_size = {
+					.min = 1,
+					.max = 1024,
+					.increment = 1
+				},
+				.digest_size = {
+					.min = 32,
+					.max = 64,
+					.increment = 32
+				},
+			}, }
+		}, }
+	},
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
@@ -91,7 +151,9 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
-			.options = { 0 }
+			.options = {
+					.udp_encap = 1
+				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
@@ -103,7 +165,10 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
-			.options = { 0 }
+			.options = {
+					.udp_encap = 1,
+					.iv_gen_disable = 1
+				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
@@ -337,13 +402,11 @@ cn9k_eth_sec_session_create(void *device,
 			goto mempool_put;
 		}
 
-		/* Always enable explicit IV.
-		 * Copy the IV from application only when iv_gen_disable flag is
-		 * set
+		/* When IV is provided by the application,
+		 * copy the IV to context and enable explicit IV flag in context.
 		 */
-		outb_sa->common_sa.ctl.explicit_iv_en = 1;
-
-		if (conf->ipsec.options.iv_gen_disable == 1) {
+		if (ipsec->options.iv_gen_disable == 1) {
+			outb_sa->common_sa.ctl.explicit_iv_en = 1;
 			iv_str = getenv("ETH_SEC_IV_OVR");
 			if (iv_str)
 				outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 17/32] common/cnxk: enable aging on CN10K platform
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (14 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 16/32] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 18/32] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
                     ` (14 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

This patch set enables aging on CNF105 variant of CN10K platform.
Enables aging statistics while dumping/reset SQ statistics.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_errata.h    |  3 +--
 drivers/common/cnxk/roc_nix_debug.c | 19 +++++++++----------
 drivers/common/cnxk/roc_nix_stats.c |  2 ++
 3 files changed, 12 insertions(+), 12 deletions(-)

diff --git a/drivers/common/cnxk/roc_errata.h b/drivers/common/cnxk/roc_errata.h
index 8dc372f956..d3b32f1786 100644
--- a/drivers/common/cnxk/roc_errata.h
+++ b/drivers/common/cnxk/roc_errata.h
@@ -30,8 +30,7 @@ roc_errata_npa_has_no_fc_stype_ststp(void)
 static inline bool
 roc_errata_nix_has_no_drop_aging(void)
 {
-	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0() ||
-		roc_model_is_cnf10kb_a0());
+	return (roc_model_is_cn10ka_a0() || roc_model_is_cnf10ka_a0());
 }
 
 /* Errata IPBUNIXRX-40130 */
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index efac7e5b14..bd7a5d3dc2 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -472,22 +472,21 @@ nix_lf_sq_dump(__io struct nix_cn10k_sq_ctx_s *ctx, uint32_t *sqb_aura_p)
 	nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
 	nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
 
-	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
-		 ctx->vfi_lso_vld, ctx->vfi_lso_vlan1_ins_ena);
+	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", ctx->vfi_lso_vld,
+		 ctx->vfi_lso_vlan1_ins_ena);
 	nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
 		 ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
-	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
-		 ctx->vfi_lso_sb, ctx->vfi_lso_sizem1);
+	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", ctx->vfi_lso_sb,
+		 ctx->vfi_lso_sizem1);
 	nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
 
-	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->scm_lso_rem);
+	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "", (uint64_t)ctx->scm_lso_rem);
 	nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
 	nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->drop_octs);
-	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "",
-		 (uint64_t)ctx->drop_pkts);
+	nix_dump("W13: aged_drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_pkts);
+	nix_dump("W13: aged_drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_octs);
+	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
 
 	*sqb_aura_p = ctx->sqb_aura;
 }
diff --git a/drivers/common/cnxk/roc_nix_stats.c b/drivers/common/cnxk/roc_nix_stats.c
index 8fd5c711c3..2e5071e1bb 100644
--- a/drivers/common/cnxk/roc_nix_stats.c
+++ b/drivers/common/cnxk/roc_nix_stats.c
@@ -238,6 +238,8 @@ nix_stat_tx_queue_reset(struct nix *nix, uint16_t qid)
 		aq->sq_mask.pkts = ~(aq->sq_mask.pkts);
 		aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs);
 		aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts);
+		aq->sq_mask.aged_drop_octs = ~(aq->sq_mask.aged_drop_octs);
+		aq->sq_mask.aged_drop_pkts = ~(aq->sq_mask.aged_drop_pkts);
 	}
 
 	rc = mbox_process(mbox);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 18/32] common/cnxk: updated shaper profile with red algorithm
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (15 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 17/32] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 19/32] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
                     ` (13 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

Updated shaper profile with user configurable RED algorithm.
This helps in configuring a TM node in red drop mode vs
stall mode.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h          | 1 +
 drivers/common/cnxk/roc_nix_tm_utils.c | 7 +++++--
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 77e4d2919b..b17623076c 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -572,6 +572,7 @@ struct roc_nix_tm_shaper_profile {
 	int32_t pkt_len_adj;
 	bool pkt_mode;
 	int8_t accuracy;
+	uint8_t red_algo;
 	/* Function to free this memory */
 	void (*free_fn)(void *profile);
 };
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index b9b605f8b1..193f9df5ff 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -1236,11 +1236,14 @@ roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
 	struct nix_tm_shaper_profile *profile;
 	struct nix_tm_shaper_data cir, pir;
 
+	if (!roc_prof)
+		return;
+
 	profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
-	tm_node->red_algo = NIX_REDALG_STD;
+	tm_node->red_algo = roc_prof->red_algo;
 
 	/* C0 doesn't support STALL when both PIR & CIR are enabled */
-	if (profile && roc_model_is_cn96_cx()) {
+	if (roc_model_is_cn96_cx()) {
 		nix_tm_shaper_conf_get(profile, &cir, &pir);
 
 		if (pir.rate && cir.rate)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 19/32] common/cnxk: add 98xx A1 platform
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (16 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 18/32] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 20/32] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
                     ` (12 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Harman Kalra

From: Harman Kalra <hkalra@marvell.com>

Adding support for 98xx A1 pass chip.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/common/cnxk/roc_model.c |  1 +
 drivers/common/cnxk/roc_model.h | 16 +++++++++++++++-
 2 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_model.c b/drivers/common/cnxk/roc_model.c
index bdbd9a96b2..04338311ec 100644
--- a/drivers/common/cnxk/roc_model.c
+++ b/drivers/common/cnxk/roc_model.c
@@ -65,6 +65,7 @@ static const struct model_db {
 	{VENDOR_ARM, PART_103xx, 0, 0, ROC_MODEL_CN103xx_A0, "cn10kb_a0"},
 	{VENDOR_ARM, PART_105xxN, 0, 0, ROC_MODEL_CNF105xxN_A0, "cnf10kb_a0"},
 	{VENDOR_CAVIUM, PART_98xx, 0, 0, ROC_MODEL_CN98xx_A0, "cn98xx_a0"},
+	{VENDOR_CAVIUM, PART_98xx, 0, 1, ROC_MODEL_CN98xx_A1, "cn98xx_a1"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 0, ROC_MODEL_CN96xx_A0, "cn96xx_a0"},
 	{VENDOR_CAVIUM, PART_96xx, 0, 1, ROC_MODEL_CN96xx_B0, "cn96xx_b0"},
 	{VENDOR_CAVIUM, PART_96xx, 2, 0, ROC_MODEL_CN96xx_C0, "cn96xx_c0"},
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index d231d44b60..57a8af06fc 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -21,6 +21,7 @@ struct roc_model {
 #define ROC_MODEL_CNF95xxN_A1  BIT_ULL(14)
 #define ROC_MODEL_CNF95xxN_B0  BIT_ULL(15)
 #define ROC_MODEL_CN98xx_A0    BIT_ULL(16)
+#define ROC_MODEL_CN98xx_A1    BIT_ULL(17)
 #define ROC_MODEL_CN106xx_A0   BIT_ULL(20)
 #define ROC_MODEL_CNF105xx_A0  BIT_ULL(21)
 #define ROC_MODEL_CNF105xxN_A0 BIT_ULL(22)
@@ -38,10 +39,11 @@ struct roc_model {
 } __plt_cache_aligned;
 
 #define ROC_MODEL_CN96xx_Ax (ROC_MODEL_CN96xx_A0 | ROC_MODEL_CN96xx_B0)
+#define ROC_MODEL_CN98xx_Ax (ROC_MODEL_CN98xx_A0 | ROC_MODEL_CN98xx_A1)
 #define ROC_MODEL_CN9K                                                         \
 	(ROC_MODEL_CN96xx_Ax | ROC_MODEL_CN96xx_C0 | ROC_MODEL_CNF95xx_A0 |    \
 	 ROC_MODEL_CNF95xx_B0 | ROC_MODEL_CNF95xxMM_A0 |                       \
-	 ROC_MODEL_CNF95xxO_A0 | ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CN98xx_A0 | \
+	 ROC_MODEL_CNF95xxO_A0 | ROC_MODEL_CNF95xxN_A0 | ROC_MODEL_CN98xx_Ax | \
 	 ROC_MODEL_CNF95xxN_A1 | ROC_MODEL_CNF95xxN_B0)
 #define ROC_MODEL_CNF9K                                                        \
 	(ROC_MODEL_CNF95xx_A0 | ROC_MODEL_CNF95xx_B0 |                         \
@@ -110,10 +112,22 @@ roc_model_is_cn10k(void)
 
 static inline uint64_t
 roc_model_is_cn98xx(void)
+{
+	return (roc_model->flag & ROC_MODEL_CN98xx_Ax);
+}
+
+static inline uint64_t
+roc_model_is_cn98xx_a0(void)
 {
 	return (roc_model->flag & ROC_MODEL_CN98xx_A0);
 }
 
+static inline uint64_t
+roc_model_is_cn98xx_a1(void)
+{
+	return (roc_model->flag & ROC_MODEL_CN98xx_A1);
+}
+
 static inline uint64_t
 roc_model_is_cn96_a0(void)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 20/32] net/cnxk: enable additional ciphers for inline
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (17 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 19/32] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 21/32] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
                     ` (11 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable below ciphers and auths as part of capabilities for inline IPsec
AES_CTR
AES_XCBC_MAC
AES_GMAC

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 86 ++++++++++++++++++++++++++++++
 1 file changed, 86 insertions(+)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 8c6e1c1765..81ce5dd8f2 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -10,6 +10,27 @@
 #include <cnxk_security.h>
 
 static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
+	{	/* NULL (CIPHER) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_NULL,
+				.block_size = 1,
+				.key_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 0,
+					.max = 0,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
+
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -60,6 +81,71 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* AES CTR */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_AES_CTR,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
+	{	/* AES-XCBC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{ .sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0,
+				},
+			}, }
+		}, }
+	},
+	{	/* AES GMAC (AUTH) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
 	{	/* SHA1 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 21/32] net/cnxk: enable 3des-cbc cipher capability
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (18 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 20/32] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 22/32] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
                     ` (10 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ankur Dwivedi, Anoob Joseph, Tejasree Kondoj
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable 3DES-CBC cipher capability for inline IPsec
processing.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c |  3 +++
 drivers/crypto/cnxk/cn9k_ipsec.c    |  6 ++++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 21 ++++++++++++++++++++-
 3 files changed, 29 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 89ac900d90..a44254931e 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -1033,6 +1033,9 @@ on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
 			case RTE_CRYPTO_CIPHER_NULL:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
 				break;
+			case RTE_CRYPTO_CIPHER_3DES_CBC:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_3DES_CBC;
+				break;
 			case RTE_CRYPTO_CIPHER_AES_CBC:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
 				aes_key_len = cipher_xform->cipher.key.length;
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 84915581fc..3d37449907 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -248,6 +248,12 @@ cn9k_ipsec_xform_verify(struct rte_security_ipsec_xform *ipsec,
 				plt_err("Transport mode AES-CBC AES-XCBC is not supported");
 				return -ENOTSUP;
 			}
+
+			if ((cipher->algo == RTE_CRYPTO_CIPHER_3DES_CBC) &&
+			    (auth->algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC)) {
+				plt_err("Transport mode 3DES-CBC AES-XCBC is not supported");
+				return -ENOTSUP;
+			}
 		}
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 81ce5dd8f2..39c072466f 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -30,7 +30,26 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, },
 		}, }
 	},
-
+	{	/* 3DES CBC  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 24,
+					.max = 24,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 16,
+					.increment = 8
+				}
+			}, },
+		}, }
+	},
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 22/32] net/cnxk: skip PFC configuration on LBK
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (19 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 21/32] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 23/32] common/cnxk: add support for CPT second pass Nithin Dabilpuram
                     ` (9 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

From: Satha Rao <skoteshwar@marvell.com>

CNXK platforms do not support PFC on LBK so skipping
configuration on LBK interfaces.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     | 2 +-
 drivers/net/cnxk/cnxk_ethdev_ops.c | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 85ad70e50b..0603d73a90 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1860,7 +1860,7 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 		pfc_conf.tx_pause.rx_qid = i;
 		rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
 							      &pfc_conf);
-		if (rc)
+		if (rc && rc != -ENOTSUP)
 			plt_err("Failed to reset PFC. error code(%d)", rc);
 	}
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 1592971073..64beabdd12 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -356,8 +356,8 @@ cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
 		return -ENOTSUP;
 	}
 
-	if (roc_nix_is_sdp(nix)) {
-		plt_err("Prio flow ctrl config is not allowed on SDP");
+	if (roc_nix_is_sdp(nix) || roc_nix_is_lbk(nix)) {
+		plt_nix_dbg("Prio flow ctrl config is not allowed on SDP/LBK");
 		return -ENOTSUP;
 	}
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 23/32] common/cnxk: add support for CPT second pass
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (20 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 22/32] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 24/32] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
                     ` (8 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Added mailbox for masking and setting nix_rq_ctx
parameters and enabling rq masking in ipsec_cfg1
so second pass is applied to all rq's

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/hw/nix.h      |  4 +-
 drivers/common/cnxk/roc_mbox.h    | 23 ++++++++-
 drivers/common/cnxk/roc_nix_inl.c | 81 +++++++++++++++++++++++++++++++
 3 files changed, 106 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 5863e358e0..a5352644ca 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -1242,7 +1242,9 @@ struct nix_cn10k_rq_ctx_s {
 	uint64_t ipsech_ena : 1;
 	uint64_t ena_wqwd : 1;
 	uint64_t cq : 20;
-	uint64_t rsvd_36_24 : 13;
+	uint64_t rsvd_34_24 : 11;
+	uint64_t port_ol4_dis : 1;
+	uint64_t port_il4_dis : 1;
 	uint64_t lenerr_dis : 1;
 	uint64_t csum_il4_dis : 1;
 	uint64_t csum_ol4_dis : 1;
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 912de1121b..688c70b4ee 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -265,7 +265,9 @@ struct mbox_msghdr {
 	  msg_rsp)                                                             \
 	M(NIX_RX_SW_SYNC, 0x8022, nix_rx_sw_sync, msg_req, msg_rsp)            \
 	M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg,        \
-	  msg_req, nix_inline_ipsec_cfg)
+	  msg_req, nix_inline_ipsec_cfg)				       \
+	M(NIX_LF_INLINE_RQ_CFG, 0x8024, nix_lf_inline_rq_cfg,                  \
+	  nix_rq_cpt_field_mask_cfg_req, msg_rsp)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES                                                   \
@@ -1088,6 +1090,25 @@ struct nix_mark_format_cfg_rsp {
 	uint8_t __io mark_format_idx;
 };
 
+struct nix_rq_cpt_field_mask_cfg_req {
+	struct mbox_msghdr hdr;
+#define RQ_CTX_MASK_MAX 6
+	union {
+		uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
+		struct nix_cn10k_rq_ctx_s rq_set;
+	};
+	union {
+		uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
+		struct nix_cn10k_rq_ctx_s rq_mask;
+	};
+	struct nix_lf_rx_ipec_cfg1_req {
+		uint32_t __io spb_cpt_aura;
+		uint8_t __io rq_mask_enable;
+		uint8_t __io spb_cpt_sizem1;
+		uint8_t __io spb_cpt_enable;
+	} ipsec_cfg1;
+};
+
 struct nix_lso_format_cfg {
 	struct mbox_msghdr hdr;
 	uint64_t __io field_mask;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index be0b8066c7..cdf31b1f0c 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -416,6 +416,70 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
 	return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
 }
 
+static int
+nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct nix_rq_cpt_field_mask_cfg_req *msk_req;
+	struct idev_cfg *idev = idev_get_cfg();
+	struct mbox *mbox = (&nix->dev)->mbox;
+	struct idev_nix_inl_cfg *inl_cfg;
+	uint64_t aura_handle;
+	int rc = -ENOSPC;
+	int i;
+
+	if (!idev)
+		return rc;
+
+	inl_cfg = &idev->inl_cfg;
+	msk_req = mbox_alloc_msg_nix_lf_inline_rq_cfg(mbox);
+	if (msk_req == NULL)
+		return rc;
+
+	for (i = 0; i < RQ_CTX_MASK_MAX; i++)
+		msk_req->rq_ctx_word_mask[i] = 0xFFFFFFFFFFFFFFFF;
+
+	msk_req->rq_set.len_ol3_dis = 1;
+	msk_req->rq_set.len_ol4_dis = 1;
+	msk_req->rq_set.len_il3_dis = 1;
+
+	msk_req->rq_set.len_il4_dis = 1;
+	msk_req->rq_set.csum_ol4_dis = 1;
+	msk_req->rq_set.csum_il4_dis = 1;
+
+	msk_req->rq_set.lenerr_dis = 1;
+	msk_req->rq_set.port_ol4_dis = 1;
+	msk_req->rq_set.port_il4_dis = 1;
+
+	msk_req->rq_set.lpb_drop_ena = 0;
+	msk_req->rq_set.spb_drop_ena = 0;
+	msk_req->rq_set.xqe_drop_ena = 0;
+
+	msk_req->rq_mask.len_ol3_dis = ~(msk_req->rq_set.len_ol3_dis);
+	msk_req->rq_mask.len_ol4_dis = ~(msk_req->rq_set.len_ol4_dis);
+	msk_req->rq_mask.len_il3_dis = ~(msk_req->rq_set.len_il3_dis);
+
+	msk_req->rq_mask.len_il4_dis = ~(msk_req->rq_set.len_il4_dis);
+	msk_req->rq_mask.csum_ol4_dis = ~(msk_req->rq_set.csum_ol4_dis);
+	msk_req->rq_mask.csum_il4_dis = ~(msk_req->rq_set.csum_il4_dis);
+
+	msk_req->rq_mask.lenerr_dis = ~(msk_req->rq_set.lenerr_dis);
+	msk_req->rq_mask.port_ol4_dis = ~(msk_req->rq_set.port_ol4_dis);
+	msk_req->rq_mask.port_il4_dis = ~(msk_req->rq_set.port_il4_dis);
+
+	msk_req->rq_mask.lpb_drop_ena = ~(msk_req->rq_set.lpb_drop_ena);
+	msk_req->rq_mask.spb_drop_ena = ~(msk_req->rq_set.spb_drop_ena);
+	msk_req->rq_mask.xqe_drop_ena = ~(msk_req->rq_set.xqe_drop_ena);
+
+	aura_handle = roc_npa_zero_aura_handle();
+	msk_req->ipsec_cfg1.spb_cpt_aura = roc_npa_aura_handle_to_aura(aura_handle);
+	msk_req->ipsec_cfg1.rq_mask_enable = enable;
+	msk_req->ipsec_cfg1.spb_cpt_sizem1 = inl_cfg->buf_sz;
+	msk_req->ipsec_cfg1.spb_cpt_enable = enable;
+
+	return mbox_process(mbox);
+}
+
 int
 roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 {
@@ -472,6 +536,14 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 		nix->need_meta_aura = true;
 		idev->inl_cfg.refs++;
 	}
+
+	if (roc_model_is_cn10kb_a0()) {
+		rc = nix_inl_rq_mask_cfg(roc_nix, true);
+		if (rc) {
+			plt_err("Failed to get rq mask rc=%d", rc);
+			return rc;
+		}
+	}
 	nix->inl_inb_ena = true;
 	return 0;
 }
@@ -481,6 +553,7 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 {
 	struct idev_cfg *idev = idev_get_cfg();
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	int rc;
 
 	if (!nix->inl_inb_ena)
 		return 0;
@@ -496,6 +569,14 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
 			nix_inl_meta_aura_destroy();
 	}
 
+	if (roc_model_is_cn10kb_a0()) {
+		rc = nix_inl_rq_mask_cfg(roc_nix, false);
+		if (rc) {
+			plt_err("Failed to get rq mask rc=%d", rc);
+			return rc;
+		}
+	}
+
 	/* Flush Inbound CTX cache entries */
 	roc_nix_cpt_ctx_cache_sync(roc_nix);
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 24/32] common/cnxk: add CQ limit associated with SQ
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (21 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 23/32] common/cnxk: add support for CPT second pass Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 25/32] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
                     ` (7 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

Update CQ threshold limit associated with sq. This is
used when we need completions for packets that are successfully
transmitted.

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       | 1 +
 drivers/common/cnxk/roc_nix_queue.c | 2 ++
 2 files changed, 3 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index b17623076c..8869cf5169 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -346,6 +346,7 @@ struct roc_nix_sq {
 	uint32_t nb_desc;
 	uint16_t qid;
 	uint16_t cqid;
+	uint16_t cq_drop_thresh;
 	bool sso_ena;
 	bool cq_ena;
 	/* End of Input parameters */
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index b197de0a77..60303329cc 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -907,6 +907,7 @@ sq_cn9k_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
 	aq->sq.sso_ena = !!sq->sso_ena;
 	aq->sq.cq_ena = !!sq->cq_ena;
 	aq->sq.cq = sq->cqid;
+	aq->sq.cq_limit = sq->cq_drop_thresh;
 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
 		aq->sq.sqe_stype = NIX_STYPE_STP;
 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
@@ -1024,6 +1025,7 @@ sq_init(struct nix *nix, struct roc_nix_sq *sq, uint32_t rr_quantum,
 	aq->sq.sso_ena = !!sq->sso_ena;
 	aq->sq.cq_ena = !!sq->cq_ena;
 	aq->sq.cq = sq->cqid;
+	aq->sq.cq_limit = sq->cq_drop_thresh;
 	if (aq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
 		aq->sq.sqe_stype = NIX_STYPE_STP;
 	aq->sq.sqb_aura = roc_npa_aura_handle_to_aura(sq->aura_handle);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 25/32] common/cnxk: support Tx compl event via RQ to CQ mapping
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (22 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 24/32] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 26/32] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
                     ` (6 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

This patch adds RoC support for Tx completion events via
RQ to CQ mapping.

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix.c       | 5 ++++-
 drivers/common/cnxk/roc_nix.h       | 2 ++
 drivers/common/cnxk/roc_nix_queue.c | 7 ++-----
 drivers/net/cnxk/cnxk_ethdev.c      | 3 +++
 4 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 151d8c3426..4bb306b60e 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -154,7 +154,10 @@ roc_nix_lf_alloc(struct roc_nix *roc_nix, uint32_t nb_rxq, uint32_t nb_txq,
 		return rc;
 	req->rq_cnt = nb_rxq;
 	req->sq_cnt = nb_txq;
-	req->cq_cnt = nb_rxq;
+	if (roc_nix->tx_compl_ena)
+		req->cq_cnt = nb_rxq + nb_txq;
+	else
+		req->cq_cnt = nb_rxq;
 	/* XQESZ can be W64 or W16 */
 	req->xqe_sz = NIX_XQESZ_W16;
 	req->rss_sz = nix->reta_sz;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 8869cf5169..8cea3232d0 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -287,6 +287,7 @@ struct roc_nix_stats_queue {
 struct roc_nix_rq {
 	/* Input parameters */
 	uint16_t qid;
+	uint16_t cqid; /* Not valid when SSO is enabled */
 	uint16_t bpf_id;
 	uint64_t aura_handle;
 	bool ipsech_ena;
@@ -412,6 +413,7 @@ struct roc_nix {
 	uint16_t max_sqb_count;
 	enum roc_nix_rss_reta_sz reta_sz;
 	bool enable_loop;
+	bool tx_compl_ena;
 	bool hw_vlan_ins;
 	uint8_t lock_rx_ctx;
 	uint16_t sqb_slack;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 60303329cc..405d9a8274 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -268,7 +268,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
-		aq->rq.cq = rq->qid;
+		aq->rq.cq = rq->cqid;
 	}
 
 	if (rq->ipsech_ena)
@@ -395,7 +395,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 		aq->rq.good_utag = rq->tag_mask >> 24;
 		aq->rq.bad_utag = rq->tag_mask >> 24;
 		aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
-		aq->rq.cq = rq->qid;
+		aq->rq.cq = rq->cqid;
 	}
 
 	if (rq->ipsech_ena) {
@@ -644,9 +644,6 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
 	if (cq == NULL)
 		return NIX_ERR_PARAM;
 
-	if (cq->qid >= nix->nb_rx_queues)
-		return NIX_ERR_QUEUE_INVALID_RANGE;
-
 	qsize = nix_qsize_clampup(cq->nb_desc);
 	cq->nb_desc = nix_qsize_to_val(qsize);
 	cq->qmask = cq->nb_desc - 1;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 0603d73a90..4ed81c3d98 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -606,6 +606,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	/* Setup ROC RQ */
 	rq = &dev->rqs[qid];
 	rq->qid = qid;
+	rq->cqid = cq->qid;
 	rq->aura_handle = mp->pool_id;
 	rq->flow_tag_width = 32;
 	rq->sso_ena = false;
@@ -1168,6 +1169,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	if (roc_nix_is_lbk(nix))
 		nix->enable_loop = eth_dev->data->dev_conf.lpbk_mode;
 
+	nix->tx_compl_ena = 0;
+
 	/* Alloc a nix lf */
 	rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
 	if (rc) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 26/32] event/cnxk: wait for CPT fc on wqe path
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (23 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 25/32] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 27/32] net/cnxk: limit port specific SA table size Nithin Dabilpuram
                     ` (5 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Wait for CPT flow control on WQE path. This is to
avoid CPT queue overflow and thereby a CPT misc
interrupt.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 07c88a974e..f5205da0e2 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -233,6 +233,16 @@ cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
 	}
 }
 
+static __rte_always_inline void
+cn10k_nix_sec_fc_wait_one(struct cn10k_eth_txq *txq)
+{
+	uint64_t nb_desc = txq->cpt_desc;
+	uint64_t *fc = txq->cpt_fc;
+
+	while (nb_desc <= __atomic_load_n(fc, __ATOMIC_RELAXED))
+		;
+}
+
 static __rte_always_inline void
 cn10k_nix_sec_fc_wait(struct cn10k_eth_txq *txq, uint16_t nb_pkts)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 27/32] net/cnxk: limit port specific SA table size
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (24 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 26/32] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 28/32] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
                     ` (4 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: jerinj, dev

Limit port specific SA table size to 1 entry when not used.
This is useful when inline device is enabled as then
Port specific SA table will not be used for Inline IPsec
inbound processing.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c         | 4 ++++
 drivers/net/cnxk/cnxk_ethdev.h         | 5 ++++-
 drivers/net/cnxk/cnxk_ethdev_devargs.c | 3 +--
 3 files changed, 9 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 4ed81c3d98..89f8cc107d 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -103,6 +103,10 @@ nix_security_setup(struct cnxk_eth_dev *dev)
 	int i, rc = 0;
 
 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+		/* Setup minimum SA table when inline device is used */
+		nix->ipsec_in_min_spi = dev->inb.no_inl_dev ? dev->inb.min_spi : 0;
+		nix->ipsec_in_max_spi = dev->inb.no_inl_dev ? dev->inb.max_spi : 1;
+
 		/* Setup Inline Inbound */
 		rc = roc_nix_inl_inb_init(nix);
 		if (rc) {
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index a4178cfeff..bed0e0eada 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -273,8 +273,11 @@ TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
 
 /* Inbound security data */
 struct cnxk_eth_dev_sec_inb {
+	/* IPSec inbound min SPI */
+	uint32_t min_spi;
+
 	/* IPSec inbound max SPI */
-	uint16_t max_spi;
+	uint32_t max_spi;
 
 	/* Using inbound with inline device */
 	bool inl_dev;
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 4ded850622..d28509dbda 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -320,12 +320,11 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 null_devargs:
 	dev->scalar_ena = !!scalar_enable;
 	dev->inb.no_inl_dev = !!no_inl_dev;
+	dev->inb.min_spi = ipsec_in_min_spi;
 	dev->inb.max_spi = ipsec_in_max_spi;
 	dev->outb.max_sa = ipsec_out_max_sa;
 	dev->outb.nb_desc = outb_nb_desc;
 	dev->outb.nb_crypto_qs = outb_nb_crypto_qs;
-	dev->nix.ipsec_in_min_spi = ipsec_in_min_spi;
-	dev->nix.ipsec_in_max_spi = ipsec_in_max_spi;
 	dev->nix.ipsec_out_max_sa = ipsec_out_max_sa;
 	dev->nix.rss_tag_as_xor = !!rss_tag_as_xor;
 	dev->nix.max_sqb_count = sqb_count;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 28/32] net/cnxk: add support for crypto cipher DES-CBC
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (25 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 27/32] net/cnxk: limit port specific SA table size Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 29/32] net/cnxk: add support for crypto auth alg MD5 Nithin Dabilpuram
                     ` (3 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Add support for DES-CBC cipher for security offload in inline mode.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c |  5 +++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 20 ++++++++++++++++++++
 2 files changed, 25 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index a44254931e..f25df54254 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -817,6 +817,7 @@ cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
 	case RTE_CRYPTO_CIPHER_AES_CTR:
 		ivlen = 8;
 		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		ivlen = ROC_CPT_DES_BLOCK_LENGTH;
 		break;
@@ -898,6 +899,7 @@ cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
 	case RTE_CRYPTO_CIPHER_AES_CBC:
 		roundup_byte = 16;
 		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
 		roundup_byte = 8;
 		break;
@@ -1033,6 +1035,9 @@ on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
 			case RTE_CRYPTO_CIPHER_NULL:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
 				break;
+			case RTE_CRYPTO_CIPHER_DES_CBC:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_DES_CBC;
+				break;
 			case RTE_CRYPTO_CIPHER_3DES_CBC:
 				ctl->enc_type = ROC_IE_ON_SA_ENC_3DES_CBC;
 				break;
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 39c072466f..27823f44ba 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -30,6 +30,26 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, },
 		}, }
 	},
+	{	/* DES  */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+			{.cipher = {
+				.algo = RTE_CRYPTO_CIPHER_DES_CBC,
+				.block_size = 8,
+				.key_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				},
+				.iv_size = {
+					.min = 8,
+					.max = 8,
+					.increment = 0
+				}
+			}, },
+		}, }
+	},
 	{	/* 3DES CBC  */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 29/32] net/cnxk: add support for crypto auth alg MD5
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (26 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 28/32] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 30/32] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
                     ` (2 subsequent siblings)
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Add support for MD5 auth algo for security offload in inline mode.

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c |  4 ++++
 drivers/net/cnxk/cn9k_ethdev_sec.c  | 20 ++++++++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index f25df54254..55382d3129 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -852,6 +852,7 @@ cnxk_ipsec_icvlen_get(enum rte_crypto_cipher_algorithm c_algo,
 	case RTE_CRYPTO_AUTH_NULL:
 		icv = 0;
 		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		icv = 12;
 		break;
@@ -1208,6 +1209,7 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 		ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template);
 	} else {
 		switch (ctl->auth_type) {
+		case ROC_IE_ON_SA_AUTH_MD5:
 		case ROC_IE_ON_SA_AUTH_SHA1:
 			template = &out_sa->sha1.template;
 			ctx_len = offsetof(struct roc_ie_on_outb_sa,
@@ -1306,6 +1308,7 @@ cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
 		case RTE_CRYPTO_AUTH_AES_GMAC:
 		case RTE_CRYPTO_AUTH_NULL:
 			break;
+		case RTE_CRYPTO_AUTH_MD5_HMAC:
 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
 			memcpy(out_sa->sha1.hmac_key, auth_key, auth_key_len);
 			break;
@@ -1354,6 +1357,7 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
 		switch (auth_xform->auth.algo) {
 		case RTE_CRYPTO_AUTH_NULL:
 			break;
+		case RTE_CRYPTO_AUTH_MD5_HMAC:
 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
 			memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key,
 			       auth_key_len);
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 27823f44ba..6a76090403 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -185,6 +185,26 @@ static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
 			}, }
 		}, }
 	},
+	{	/* MD5 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+				.block_size = 64,
+				.key_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				},
+			}, }
+		}, }
+	},
 	{	/* SHA1 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 30/32] net/cnxk: enable esn and antireplay support
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (27 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 29/32] net/cnxk: add support for crypto auth alg MD5 Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 31/32] common/cnxk: dump device basic info to file Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 32/32] net/cnxk: dumps device private information Nithin Dabilpuram
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Vidya Sagar Velumuri

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Enable ESN and anti-replay in IPsec capabilities
Add support for session update security API
Fix the cpt command population for ESN enabled case

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 139 ++++++++++++++++++++++++++++-
 1 file changed, 137 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 6a76090403..a96a4fa8cd 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -296,8 +296,10 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
 			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
 			.options = {
-					.udp_encap = 1
+					.udp_encap = 1,
+					.esn = 1
 				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
@@ -312,7 +314,8 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
 			.options = {
 					.udp_encap = 1,
-					.iv_gen_disable = 1
+					.iv_gen_disable = 1,
+					.esn = 1
 				}
 		},
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
@@ -375,6 +378,137 @@ outb_dbg_iv_update(struct roc_ie_on_common_sa *common_sa, const char *__iv_str)
 	free(iv_str);
 }
 
+static int
+cn9k_eth_sec_session_update(void *device,
+			    struct rte_security_session *sess,
+			    struct rte_security_session_conf *conf)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_security_ipsec_xform *ipsec;
+	struct cn9k_outb_priv_data *outb_priv;
+	struct cnxk_ipsec_outb_rlens *rlens;
+	struct cn9k_sec_sess_priv sess_priv;
+	struct rte_crypto_sym_xform *crypto;
+	struct cnxk_eth_sec_sess *eth_sec;
+	struct roc_ie_on_outb_sa *outb_sa;
+	rte_spinlock_t *lock;
+	char tbuf[128] = {0};
+	const char *iv_str;
+	uint32_t sa_idx;
+	int ctx_len;
+	int rc = 0;
+
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+		return -ENOTSUP;
+
+	if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
+		return -ENOTSUP;
+
+	if (rte_security_dynfield_register() < 0)
+		return -ENOTSUP;
+
+	ipsec = &conf->ipsec;
+	crypto = conf->crypto_xform;
+
+	if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+		return -ENOTSUP;
+
+	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+	if (!eth_sec)
+		return -ENOENT;
+
+	eth_sec->spi = conf->ipsec.spi;
+
+	lock = &dev->outb.lock;
+	rte_spinlock_lock(lock);
+
+	outb_sa = eth_sec->sa;
+	outb_priv = roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(outb_sa);
+	sa_idx = outb_priv->sa_idx;
+
+	/* Disable SA */
+	outb_sa->common_sa.ctl.valid = 0;
+
+	/* Sync SA content */
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+	sess_priv.u64 = 0;
+	memset(outb_sa, 0, sizeof(struct roc_ie_on_outb_sa));
+
+	/* Fill outbound sa params */
+	rc = cnxk_on_ipsec_outb_sa_create(ipsec, crypto, outb_sa);
+	if (rc < 0) {
+		snprintf(tbuf, sizeof(tbuf),
+			 "Failed to init outbound sa, rc=%d", rc);
+		rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+		goto exit;
+	}
+
+	ctx_len = rc;
+	rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa, outb_sa, false,
+				   ctx_len);
+	if (rc) {
+		snprintf(tbuf, sizeof(tbuf),
+			 "Failed to init outbound sa, rc=%d", rc);
+		rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+		goto exit;
+	}
+
+	/* When IV is provided by the application,
+	 * copy the IV to context and enable explicit IV flag in context.
+	 */
+	if (ipsec->options.iv_gen_disable == 1) {
+		outb_sa->common_sa.ctl.explicit_iv_en = 1;
+		iv_str = getenv("ETH_SEC_IV_OVR");
+		if (iv_str)
+			outb_dbg_iv_update(&outb_sa->common_sa, iv_str);
+	}
+
+	outb_priv->userdata = conf->userdata;
+	outb_priv->eth_sec = eth_sec;
+	/* Start sequence number with 1 */
+	outb_priv->esn = ipsec->esn.value;
+
+	memcpy(&outb_priv->nonce, outb_sa->common_sa.iv.gcm.nonce, 4);
+	if (outb_sa->common_sa.ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
+		outb_priv->copy_salt = 1;
+
+	rlens = &outb_priv->rlens;
+	/* Save rlen info */
+	cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
+
+	sess_priv.sa_idx = outb_priv->sa_idx;
+	sess_priv.roundup_byte = rlens->roundup_byte;
+	sess_priv.roundup_len = rlens->roundup_len;
+	sess_priv.partial_len = rlens->partial_len;
+
+	/* Pointer from eth_sec -> outb_sa */
+	eth_sec->sa = outb_sa;
+	eth_sec->sess = sess;
+	eth_sec->sa_idx = sa_idx;
+	eth_sec->spi = ipsec->spi;
+
+	/* Sync SA content */
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+	rte_spinlock_unlock(lock);
+
+	plt_nix_dbg("Created outbound session with spi=%u, sa_idx=%u",
+		    eth_sec->spi, eth_sec->sa_idx);
+
+	/* Update fast path info in priv area.
+	 */
+	set_sec_session_private_data(sess, (void *)sess_priv.u64);
+
+	return 0;
+exit:
+	rte_spinlock_unlock(lock);
+	if (rc)
+		plt_err("%s", tbuf);
+	return rc;
+}
+
 static int
 cn9k_eth_sec_session_create(void *device,
 			    struct rte_security_session_conf *conf,
@@ -677,6 +811,7 @@ cn9k_eth_sec_ops_override(void)
 
 	/* Update platform specific ops */
 	cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
+	cnxk_eth_sec_ops.session_update = cn9k_eth_sec_session_update;
 	cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
 	cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 31/32] common/cnxk: dump device basic info to file
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (28 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 30/32] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-12 13:14   ` [PATCH v3 32/32] net/cnxk: dumps device private information Nithin Dabilpuram
  30 siblings, 0 replies; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Add helper API to complete device info for debug purposes.
This is used by ethdev dump API to dump ethdev's internal info.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/common/cnxk/roc_nix.h             |  12 +-
 drivers/common/cnxk/roc_nix_debug.c       | 726 +++++++++++-----------
 drivers/common/cnxk/roc_nix_inl.h         |   4 +-
 drivers/common/cnxk/roc_nix_inl_dev_irq.c |   6 +-
 drivers/common/cnxk/roc_nix_irq.c         |   6 +-
 drivers/common/cnxk/roc_nix_priv.h        |   2 +-
 drivers/common/cnxk/roc_nix_tm.c          |   4 +-
 7 files changed, 395 insertions(+), 365 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 8cea3232d0..5c2a869eba 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -493,13 +493,13 @@ int __roc_api roc_nix_rx_drop_re_set(struct roc_nix *roc_nix, bool ena);
 /* Debug */
 int __roc_api roc_nix_lf_get_reg_count(struct roc_nix *roc_nix);
 int __roc_api roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data);
-int __roc_api roc_nix_queues_ctx_dump(struct roc_nix *roc_nix);
+int __roc_api roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file);
 void __roc_api roc_nix_cqe_dump(const struct nix_cqe_hdr_s *cq);
-void __roc_api roc_nix_rq_dump(struct roc_nix_rq *rq);
-void __roc_api roc_nix_cq_dump(struct roc_nix_cq *cq);
-void __roc_api roc_nix_sq_dump(struct roc_nix_sq *sq);
-void __roc_api roc_nix_tm_dump(struct roc_nix *roc_nix);
-void __roc_api roc_nix_dump(struct roc_nix *roc_nix);
+void __roc_api roc_nix_rq_dump(struct roc_nix_rq *rq, FILE *file);
+void __roc_api roc_nix_cq_dump(struct roc_nix_cq *cq, FILE *file);
+void __roc_api roc_nix_sq_dump(struct roc_nix_sq *sq, FILE *file);
+void __roc_api roc_nix_tm_dump(struct roc_nix *roc_nix, FILE *file);
+void __roc_api roc_nix_dump(struct roc_nix *roc_nix, FILE *file);
 
 /* IRQ */
 void __roc_api roc_nix_rx_queue_intr_enable(struct roc_nix *roc_nix,
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index bd7a5d3dc2..6f82350b53 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -5,14 +5,27 @@
 #include "roc_api.h"
 #include "roc_priv.h"
 
-#define nix_dump plt_dump
+
+#define nix_dump(file, fmt, ...) do {                                           \
+	if ((file) == NULL)							\
+		plt_dump(fmt, ##__VA_ARGS__);					\
+	else                                                                    \
+		fprintf(file, fmt "\n", ##__VA_ARGS__);                         \
+} while (0)
+
 #define NIX_REG_INFO(reg)                                                      \
 	{                                                                      \
 		reg, #reg                                                      \
 	}
 #define NIX_REG_NAME_SZ 48
 
-#define nix_dump_no_nl plt_dump_no_nl
+#define nix_dump_no_nl(file, fmt, ...) do {                                     \
+	if ((file) == NULL)                                                     \
+		plt_dump_no_nl(fmt, ##__VA_ARGS__);				\
+	else                                                                    \
+		fprintf(file, fmt, ##__VA_ARGS__);                              \
+} while (0)
+
 
 struct nix_lf_reg_info {
 	uint32_t offset;
@@ -45,7 +58,7 @@ static const struct nix_lf_reg_info nix_lf_reg[] = {
 };
 
 static void
-nix_bitmap_dump(struct plt_bitmap *bmp)
+nix_bitmap_dump(struct plt_bitmap *bmp, FILE *file)
 {
 	uint32_t pos = 0, start_pos;
 	uint64_t slab = 0;
@@ -57,7 +70,7 @@ nix_bitmap_dump(struct plt_bitmap *bmp)
 
 	start_pos = pos;
 
-	nix_dump_no_nl("  \t\t[");
+	nix_dump_no_nl(file, "  \t\t[");
 	do {
 		if (!slab)
 			break;
@@ -65,12 +78,12 @@ nix_bitmap_dump(struct plt_bitmap *bmp)
 
 		for (i = 0; i < 64; i++)
 			if (slab & (1ULL << i))
-				nix_dump_no_nl("%d, ", i);
+				nix_dump_no_nl(file, "%d, ", i);
 
 		if (!plt_bitmap_scan(bmp, &pos, &slab))
 			break;
 	} while (start_pos != pos);
-	nix_dump_no_nl(" ]");
+	nix_dump_no_nl(file, " ]");
 }
 
 int
@@ -114,6 +127,7 @@ roc_nix_lf_get_reg_count(struct roc_nix *roc_nix)
 int
 nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data)
 {
+	FILE *file = NULL;
 	bool dump_stdout;
 	uint64_t reg;
 	uint32_t i;
@@ -123,7 +137,7 @@ nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data)
 	for (i = 0; i < PLT_DIM(nix_lf_reg); i++) {
 		reg = plt_read64(nix_lf_base + nix_lf_reg[i].offset);
 		if (dump_stdout && reg)
-			nix_dump("%32s = 0x%" PRIx64, nix_lf_reg[i].name, reg);
+			nix_dump(file, "%32s = 0x%" PRIx64, nix_lf_reg[i].name, reg);
 		if (data)
 			*data++ = reg;
 	}
@@ -136,6 +150,7 @@ nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats,
 		     uint8_t lf_rx_stats)
 {
 	uint32_t i, count = 0;
+	FILE *file = NULL;
 	bool dump_stdout;
 	uint64_t reg;
 
@@ -145,7 +160,7 @@ nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats,
 	for (i = 0; i < lf_tx_stats; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_TX_STATX(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_TX_STATX", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_TX_STATX", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -156,7 +171,7 @@ nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats,
 	for (i = 0; i < lf_rx_stats; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_RX_STATX(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_RX_STATX", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_RX_STATX", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -170,6 +185,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 		    uint16_t cints)
 {
 	uint32_t i, count = 0;
+	FILE *file = NULL;
 	bool dump_stdout;
 	uint64_t reg;
 
@@ -179,7 +195,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_CNT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_CNT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_CNT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -190,7 +206,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_INT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_INT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_INT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -201,7 +217,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1S(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1S",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1S",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -212,7 +228,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1C(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1C",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1C",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -223,7 +239,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_CNT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_CNT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_CNT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -234,7 +250,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_WAIT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_WAIT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_WAIT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -245,7 +261,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_INT(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT", i,
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT", i,
 				 reg);
 		if (data)
 			*data++ = reg;
@@ -256,7 +272,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_INT_W1S(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT_W1S",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT_W1S",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -267,7 +283,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1S(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1S",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1S",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -278,7 +294,7 @@ nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
 	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1C(i));
 		if (dump_stdout && reg)
-			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1C",
+			nix_dump(file, "%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1C",
 				 i, reg);
 		if (data)
 			*data++ = reg;
@@ -368,296 +384,296 @@ nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, __io void **ctx_p)
 }
 
 static inline void
-nix_cn9k_lf_sq_dump(__io struct nix_sq_ctx_s *ctx, uint32_t *sqb_aura_p)
+nix_cn9k_lf_sq_dump(__io struct nix_sq_ctx_s *ctx, uint32_t *sqb_aura_p, FILE *file)
 {
-	nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
+	nix_dump(file, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
 		 ctx->sqe_way_mask, ctx->cq);
-	nix_dump("W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
+	nix_dump(file, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
 		 ctx->sdp_mcast, ctx->substream);
-	nix_dump("W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
+	nix_dump(file, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
 		 ctx->ena);
 
-	nix_dump("W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
+	nix_dump(file, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
 		 ctx->sqb_count, ctx->default_chan);
-	nix_dump("W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d",
+	nix_dump(file, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d",
 		 ctx->smq_rr_quantum, ctx->sso_ena);
-	nix_dump("W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
+	nix_dump(file, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
 		 ctx->xoff, ctx->cq_ena, ctx->smq);
 
-	nix_dump("W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
+	nix_dump(file, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
 		 ctx->sqe_stype, ctx->sq_int_ena);
-	nix_dump("W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
+	nix_dump(file, "W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
 		 ctx->sqb_aura);
-	nix_dump("W2: smq_rr_count \t\t%d\n", ctx->smq_rr_count);
+	nix_dump(file, "W2: smq_rr_count \t\t%d\n", ctx->smq_rr_count);
 
-	nix_dump("W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
+	nix_dump(file, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
 		 ctx->smq_next_sq_vld, ctx->smq_pend);
-	nix_dump("W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
+	nix_dump(file, "W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
 		 ctx->smenq_next_sqb_vld, ctx->head_offset);
-	nix_dump("W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
+	nix_dump(file, "W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
 		 ctx->smenq_offset, ctx->tail_offset);
-	nix_dump("W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
+	nix_dump(file, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
 		 ctx->smq_lso_segnum, ctx->smq_next_sq);
-	nix_dump("W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
+	nix_dump(file, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
 		 ctx->lmt_dis);
-	nix_dump("W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
+	nix_dump(file, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
 		 ctx->cq_limit, ctx->max_sqe_size);
 
-	nix_dump("W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
-	nix_dump("W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
-	nix_dump("W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
-	nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
-	nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
+	nix_dump(file, "W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
+	nix_dump(file, "W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
+	nix_dump(file, "W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
+	nix_dump(file, "W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
+	nix_dump(file, "W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
 
-	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
+	nix_dump(file, "W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
 		 ctx->vfi_lso_vld, ctx->vfi_lso_vlan1_ins_ena);
-	nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
+	nix_dump(file, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
 		 ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
-	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
+	nix_dump(file, "W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
 		 ctx->vfi_lso_sb, ctx->vfi_lso_sizem1);
-	nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
+	nix_dump(file, "W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
 
-	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "",
+	nix_dump(file, "W10: scm_lso_rem \t\t0x%" PRIx64 "",
 		 (uint64_t)ctx->scm_lso_rem);
-	nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "",
+	nix_dump(file, "W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W14: dropped_octs \t\t0x%" PRIx64 "",
 		 (uint64_t)ctx->drop_octs);
-	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "",
+	nix_dump(file, "W15: dropped_pkts \t\t0x%" PRIx64 "",
 		 (uint64_t)ctx->drop_pkts);
 
 	*sqb_aura_p = ctx->sqb_aura;
 }
 
 static inline void
-nix_lf_sq_dump(__io struct nix_cn10k_sq_ctx_s *ctx, uint32_t *sqb_aura_p)
+nix_lf_sq_dump(__io struct nix_cn10k_sq_ctx_s *ctx, uint32_t *sqb_aura_p, FILE *file)
 {
-	nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
+	nix_dump(file, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
 		 ctx->sqe_way_mask, ctx->cq);
-	nix_dump("W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
+	nix_dump(file, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
 		 ctx->sdp_mcast, ctx->substream);
-	nix_dump("W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
+	nix_dump(file, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n", ctx->qint_idx,
 		 ctx->ena);
 
-	nix_dump("W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
+	nix_dump(file, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
 		 ctx->sqb_count, ctx->default_chan);
-	nix_dump("W1: smq_rr_weight \t\t%d\nW1: sso_ena \t\t\t%d",
+	nix_dump(file, "W1: smq_rr_weight \t\t%d\nW1: sso_ena \t\t\t%d",
 		 ctx->smq_rr_weight, ctx->sso_ena);
-	nix_dump("W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
+	nix_dump(file, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
 		 ctx->xoff, ctx->cq_ena, ctx->smq);
 
-	nix_dump("W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
+	nix_dump(file, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
 		 ctx->sqe_stype, ctx->sq_int_ena);
-	nix_dump("W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
+	nix_dump(file, "W2: sq_int  \t\t\t%d\nW2: sqb_aura \t\t\t%d", ctx->sq_int,
 		 ctx->sqb_aura);
-	nix_dump("W2: smq_rr_count[ub:lb] \t\t%x:%x\n", ctx->smq_rr_count_ub,
+	nix_dump(file, "W2: smq_rr_count[ub:lb] \t\t%x:%x\n", ctx->smq_rr_count_ub,
 		 ctx->smq_rr_count_lb);
 
-	nix_dump("W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
+	nix_dump(file, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
 		 ctx->smq_next_sq_vld, ctx->smq_pend);
-	nix_dump("W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
+	nix_dump(file, "W3: smenq_next_sqb_vld  \t%d\nW3: head_offset\t\t\t%d",
 		 ctx->smenq_next_sqb_vld, ctx->head_offset);
-	nix_dump("W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
+	nix_dump(file, "W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
 		 ctx->smenq_offset, ctx->tail_offset);
-	nix_dump("W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
+	nix_dump(file, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
 		 ctx->smq_lso_segnum, ctx->smq_next_sq);
-	nix_dump("W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
+	nix_dump(file, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d", ctx->mnq_dis,
 		 ctx->lmt_dis);
-	nix_dump("W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
+	nix_dump(file, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
 		 ctx->cq_limit, ctx->max_sqe_size);
 
-	nix_dump("W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
-	nix_dump("W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
-	nix_dump("W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
-	nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
-	nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
+	nix_dump(file, "W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
+	nix_dump(file, "W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
+	nix_dump(file, "W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
+	nix_dump(file, "W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
+	nix_dump(file, "W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
 
-	nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", ctx->vfi_lso_vld,
+	nix_dump(file, "W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d", ctx->vfi_lso_vld,
 		 ctx->vfi_lso_vlan1_ins_ena);
-	nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
+	nix_dump(file, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
 		 ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
-	nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", ctx->vfi_lso_sb,
+	nix_dump(file, "W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d", ctx->vfi_lso_sb,
 		 ctx->vfi_lso_sizem1);
-	nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
+	nix_dump(file, "W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
 
-	nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "", (uint64_t)ctx->scm_lso_rem);
-	nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W13: aged_drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_pkts);
-	nix_dump("W13: aged_drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_octs);
-	nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
-	nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
+	nix_dump(file, "W10: scm_lso_rem \t\t0x%" PRIx64 "", (uint64_t)ctx->scm_lso_rem);
+	nix_dump(file, "W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W13: aged_drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_pkts);
+	nix_dump(file, "W13: aged_drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->aged_drop_octs);
+	nix_dump(file, "W14: dropped_octs \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump(file, "W15: dropped_pkts \t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
 
 	*sqb_aura_p = ctx->sqb_aura;
 }
 
 static inline void
-nix_cn9k_lf_rq_dump(__io struct nix_rq_ctx_s *ctx)
+nix_cn9k_lf_rq_dump(__io struct nix_rq_ctx_s *ctx, FILE *file)
 {
-	nix_dump("W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x",
+	nix_dump(file, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x",
 		 ctx->wqe_aura, ctx->substream);
-	nix_dump("W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
+	nix_dump(file, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
 		 ctx->ena_wqwd);
-	nix_dump("W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
+	nix_dump(file, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
 		 ctx->ipsech_ena, ctx->sso_ena);
-	nix_dump("W0: ena \t\t\t%d\n", ctx->ena);
+	nix_dump(file, "W0: ena \t\t\t%d\n", ctx->ena);
 
-	nix_dump("W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
+	nix_dump(file, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
 		 ctx->lpb_drop_ena, ctx->spb_drop_ena);
-	nix_dump("W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
+	nix_dump(file, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
 		 ctx->xqe_drop_ena, ctx->wqe_caching);
-	nix_dump("W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
+	nix_dump(file, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
 		 ctx->pb_caching, ctx->sso_tt);
-	nix_dump("W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
+	nix_dump(file, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
 		 ctx->lpb_aura);
-	nix_dump("W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
+	nix_dump(file, "W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
 
-	nix_dump("W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
+	nix_dump(file, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
 		 ctx->xqe_hdr_split, ctx->xqe_imm_copy);
-	nix_dump("W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
+	nix_dump(file, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
 		 ctx->xqe_imm_size, ctx->later_skip);
-	nix_dump("W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
+	nix_dump(file, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
 		 ctx->first_skip, ctx->lpb_sizem1);
-	nix_dump("W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
+	nix_dump(file, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
 		 ctx->wqe_skip);
-	nix_dump("W2: spb_sizem1 \t\t\t%d\n", ctx->spb_sizem1);
+	nix_dump(file, "W2: spb_sizem1 \t\t\t%d\n", ctx->spb_sizem1);
 
-	nix_dump("W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
+	nix_dump(file, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
 		 ctx->spb_pool_pass, ctx->spb_pool_drop);
-	nix_dump("W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
+	nix_dump(file, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
 		 ctx->spb_aura_pass, ctx->spb_aura_drop);
-	nix_dump("W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
+	nix_dump(file, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
 		 ctx->wqe_pool_pass, ctx->wqe_pool_drop);
-	nix_dump("W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
+	nix_dump(file, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
 		 ctx->xqe_pass, ctx->xqe_drop);
 
-	nix_dump("W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
+	nix_dump(file, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
 		 ctx->qint_idx, ctx->rq_int_ena);
-	nix_dump("W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
+	nix_dump(file, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
 		 ctx->lpb_pool_pass);
-	nix_dump("W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
+	nix_dump(file, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
 		 ctx->lpb_pool_drop, ctx->lpb_aura_pass);
-	nix_dump("W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
+	nix_dump(file, "W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
 
-	nix_dump("W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
+	nix_dump(file, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
 		 ctx->flow_tagw, ctx->bad_utag);
-	nix_dump("W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
+	nix_dump(file, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
 		 ctx->ltag);
 
-	nix_dump("W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
-	nix_dump("W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
-	nix_dump("W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
+	nix_dump(file, "W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump(file, "W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
+	nix_dump(file, "W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
 }
 
 void
-nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx)
+nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx, FILE *file)
 {
-	nix_dump("W0: wqe_aura \t\t\t%d\nW0: len_ol3_dis \t\t\t%d",
+	nix_dump(file, "W0: wqe_aura \t\t\t%d\nW0: len_ol3_dis \t\t\t%d",
 		 ctx->wqe_aura, ctx->len_ol3_dis);
-	nix_dump("W0: len_ol4_dis \t\t\t%d\nW0: len_il3_dis \t\t\t%d",
+	nix_dump(file, "W0: len_ol4_dis \t\t\t%d\nW0: len_il3_dis \t\t\t%d",
 		 ctx->len_ol4_dis, ctx->len_il3_dis);
-	nix_dump("W0: len_il4_dis \t\t\t%d\nW0: csum_ol4_dis \t\t\t%d",
+	nix_dump(file, "W0: len_il4_dis \t\t\t%d\nW0: csum_ol4_dis \t\t\t%d",
 		 ctx->len_il4_dis, ctx->csum_ol4_dis);
-	nix_dump("W0: csum_ol3_dis \t\t\t%d\nW0: lenerr_dis \t\t\t%d",
+	nix_dump(file, "W0: csum_ol3_dis \t\t\t%d\nW0: lenerr_dis \t\t\t%d",
 		 ctx->csum_ol4_dis, ctx->lenerr_dis);
-	nix_dump("W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
+	nix_dump(file, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d", ctx->cq,
 		 ctx->ena_wqwd);
-	nix_dump("W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
+	nix_dump(file, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
 		 ctx->ipsech_ena, ctx->sso_ena);
-	nix_dump("W0: ena \t\t\t%d\n", ctx->ena);
+	nix_dump(file, "W0: ena \t\t\t%d\n", ctx->ena);
 
-	nix_dump("W1: chi_ena \t\t%d\nW1: ipsecd_drop_en \t\t%d", ctx->chi_ena,
+	nix_dump(file, "W1: chi_ena \t\t%d\nW1: ipsecd_drop_en \t\t%d", ctx->chi_ena,
 		 ctx->ipsecd_drop_en);
-	nix_dump("W1: pb_stashing \t\t\t%d", ctx->pb_stashing);
-	nix_dump("W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
+	nix_dump(file, "W1: pb_stashing \t\t\t%d", ctx->pb_stashing);
+	nix_dump(file, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
 		 ctx->lpb_drop_ena, ctx->spb_drop_ena);
-	nix_dump("W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
+	nix_dump(file, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
 		 ctx->xqe_drop_ena, ctx->wqe_caching);
-	nix_dump("W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
+	nix_dump(file, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
 		 ctx->pb_caching, ctx->sso_tt);
-	nix_dump("W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
+	nix_dump(file, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d", ctx->sso_grp,
 		 ctx->lpb_aura);
-	nix_dump("W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
+	nix_dump(file, "W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
 
-	nix_dump("W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
+	nix_dump(file, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
 		 ctx->xqe_hdr_split, ctx->xqe_imm_copy);
-	nix_dump("W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
+	nix_dump(file, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
 		 ctx->xqe_imm_size, ctx->later_skip);
-	nix_dump("W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
+	nix_dump(file, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
 		 ctx->first_skip, ctx->lpb_sizem1);
-	nix_dump("W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
+	nix_dump(file, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d", ctx->spb_ena,
 		 ctx->wqe_skip);
-	nix_dump("W2: spb_sizem1 \t\t\t%d\nW2: policer_ena \t\t\t%d",
+	nix_dump(file, "W2: spb_sizem1 \t\t\t%d\nW2: policer_ena \t\t\t%d",
 		 ctx->spb_sizem1, ctx->policer_ena);
-	nix_dump("W2: band_prof_id \t\t\t%d", ctx->band_prof_id);
+	nix_dump(file, "W2: band_prof_id \t\t\t%d", ctx->band_prof_id);
 
-	nix_dump("W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
+	nix_dump(file, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
 		 ctx->spb_pool_pass, ctx->spb_pool_drop);
-	nix_dump("W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
+	nix_dump(file, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
 		 ctx->spb_aura_pass, ctx->spb_aura_drop);
-	nix_dump("W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
+	nix_dump(file, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
 		 ctx->wqe_pool_pass, ctx->wqe_pool_drop);
-	nix_dump("W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
+	nix_dump(file, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
 		 ctx->xqe_pass, ctx->xqe_drop);
 
-	nix_dump("W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
+	nix_dump(file, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
 		 ctx->qint_idx, ctx->rq_int_ena);
-	nix_dump("W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
+	nix_dump(file, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d", ctx->rq_int,
 		 ctx->lpb_pool_pass);
-	nix_dump("W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
+	nix_dump(file, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
 		 ctx->lpb_pool_drop, ctx->lpb_aura_pass);
-	nix_dump("W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
+	nix_dump(file, "W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
 
-	nix_dump("W5: vwqe_skip \t\t\t%d\nW5: max_vsize_exp \t\t\t%d",
+	nix_dump(file, "W5: vwqe_skip \t\t\t%d\nW5: max_vsize_exp \t\t\t%d",
 		 ctx->vwqe_skip, ctx->max_vsize_exp);
-	nix_dump("W5: vtime_wait \t\t\t%d\nW5: vwqe_ena \t\t\t%d",
+	nix_dump(file, "W5: vtime_wait \t\t\t%d\nW5: vwqe_ena \t\t\t%d",
 		 ctx->vtime_wait, ctx->max_vsize_exp);
-	nix_dump("W5: ipsec_vwqe \t\t\t%d", ctx->ipsec_vwqe);
-	nix_dump("W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
+	nix_dump(file, "W5: ipsec_vwqe \t\t\t%d", ctx->ipsec_vwqe);
+	nix_dump(file, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
 		 ctx->flow_tagw, ctx->bad_utag);
-	nix_dump("W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
+	nix_dump(file, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n", ctx->good_utag,
 		 ctx->ltag);
 
-	nix_dump("W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
-	nix_dump("W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
-	nix_dump("W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
-	nix_dump("W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
-	nix_dump("W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
+	nix_dump(file, "W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+	nix_dump(file, "W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+	nix_dump(file, "W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+	nix_dump(file, "W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
+	nix_dump(file, "W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
 }
 
 static inline void
-nix_lf_cq_dump(__io struct nix_cq_ctx_s *ctx)
+nix_lf_cq_dump(__io struct nix_cq_ctx_s *ctx, FILE *file)
 {
-	nix_dump("W0: base \t\t\t0x%" PRIx64 "\n", ctx->base);
+	nix_dump(file, "W0: base \t\t\t0x%" PRIx64 "\n", ctx->base);
 
-	nix_dump("W1: wrptr \t\t\t%" PRIx64 "", (uint64_t)ctx->wrptr);
-	nix_dump("W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d", ctx->avg_con,
+	nix_dump(file, "W1: wrptr \t\t\t%" PRIx64 "", (uint64_t)ctx->wrptr);
+	nix_dump(file, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d", ctx->avg_con,
 		 ctx->cint_idx);
-	nix_dump("W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d", ctx->cq_err,
+	nix_dump(file, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d", ctx->cq_err,
 		 ctx->qint_idx);
-	nix_dump("W1: bpid  \t\t\t%d\nW1: bp_ena \t\t\t%d\n", ctx->bpid,
+	nix_dump(file, "W1: bpid  \t\t\t%d\nW1: bp_ena \t\t\t%d\n", ctx->bpid,
 		 ctx->bp_ena);
 
-	nix_dump("W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
+	nix_dump(file, "W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
 		 ctx->update_time, ctx->avg_level);
-	nix_dump("W2: head \t\t\t%d\nW2: tail \t\t\t%d\n", ctx->head,
+	nix_dump(file, "W2: head \t\t\t%d\nW2: tail \t\t\t%d\n", ctx->head,
 		 ctx->tail);
 
-	nix_dump("W3: cq_err_int_ena \t\t%d\nW3: cq_err_int \t\t\t%d",
+	nix_dump(file, "W3: cq_err_int_ena \t\t%d\nW3: cq_err_int \t\t\t%d",
 		 ctx->cq_err_int_ena, ctx->cq_err_int);
-	nix_dump("W3: qsize \t\t\t%d\nW3: caching \t\t\t%d", ctx->qsize,
+	nix_dump(file, "W3: qsize \t\t\t%d\nW3: caching \t\t\t%d", ctx->qsize,
 		 ctx->caching);
-	nix_dump("W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d", ctx->substream,
+	nix_dump(file, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d", ctx->substream,
 		 ctx->ena);
-	nix_dump("W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d", ctx->drop_ena,
+	nix_dump(file, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d", ctx->drop_ena,
 		 ctx->drop);
-	nix_dump("W3: bp \t\t\t\t%d\n", ctx->bp);
+	nix_dump(file, "W3: bp \t\t\t\t%d\n", ctx->bp);
 }
 
 int
-roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
+roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	int rc = -1, q, rq = nix->nb_rx_queues;
@@ -679,9 +695,9 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			plt_err("Failed to get cq context");
 			goto fail;
 		}
-		nix_dump("============== port=%d cq=%d ===============",
+		nix_dump(file, "============== port=%d cq=%d ===============",
 			 roc_nix->port_id, q);
-		nix_lf_cq_dump(ctx);
+		nix_lf_cq_dump(ctx, file);
 	}
 
 	for (q = 0; q < rq; q++) {
@@ -690,12 +706,12 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			plt_err("Failed to get rq context");
 			goto fail;
 		}
-		nix_dump("============== port=%d rq=%d ===============",
+		nix_dump(file, "============== port=%d rq=%d ===============",
 			 roc_nix->port_id, q);
 		if (roc_model_is_cn9k())
-			nix_cn9k_lf_rq_dump(ctx);
+			nix_cn9k_lf_rq_dump(ctx, file);
 		else
-			nix_lf_rq_dump(ctx);
+			nix_lf_rq_dump(ctx, file);
 	}
 
 	for (q = 0; q < sq; q++) {
@@ -704,12 +720,12 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			plt_err("Failed to get sq context");
 			goto fail;
 		}
-		nix_dump("============== port=%d sq=%d ===============",
+		nix_dump(file, "============== port=%d sq=%d ===============",
 			 roc_nix->port_id, q);
 		if (roc_model_is_cn9k())
-			nix_cn9k_lf_sq_dump(ctx, &sqb_aura);
+			nix_cn9k_lf_sq_dump(ctx, &sqb_aura, file);
 		else
-			nix_lf_sq_dump(ctx, &sqb_aura);
+			nix_lf_sq_dump(ctx, &sqb_aura, file);
 
 		if (!npa_lf) {
 			plt_err("NPA LF does not exist");
@@ -730,15 +746,15 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 			continue;
 		}
 
-		nix_dump("\nSQB Aura W0: Pool addr\t\t0x%" PRIx64 "",
+		nix_dump(file, "\nSQB Aura W0: Pool addr\t\t0x%" PRIx64 "",
 			 npa_rsp->aura.pool_addr);
-		nix_dump("SQB Aura W1: ena\t\t\t%d", npa_rsp->aura.ena);
-		nix_dump("SQB Aura W2: count\t\t%" PRIx64 "",
+		nix_dump(file, "SQB Aura W1: ena\t\t\t%d", npa_rsp->aura.ena);
+		nix_dump(file, "SQB Aura W2: count\t\t%" PRIx64 "",
 			 (uint64_t)npa_rsp->aura.count);
-		nix_dump("SQB Aura W3: limit\t\t%" PRIx64 "",
+		nix_dump(file, "SQB Aura W3: limit\t\t%" PRIx64 "",
 			 (uint64_t)npa_rsp->aura.limit);
-		nix_dump("SQB Aura W3: fc_ena\t\t%d", npa_rsp->aura.fc_ena);
-		nix_dump("SQB Aura W4: fc_addr\t\t0x%" PRIx64 "\n",
+		nix_dump(file, "SQB Aura W3: fc_ena\t\t%d", npa_rsp->aura.fc_ena);
+		nix_dump(file, "SQB Aura W4: fc_addr\t\t0x%" PRIx64 "\n",
 			 npa_rsp->aura.fc_addr);
 	}
 
@@ -750,120 +766,122 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 void
 roc_nix_cqe_dump(const struct nix_cqe_hdr_s *cq)
 {
+	FILE *file = NULL;
 	const union nix_rx_parse_u *rx =
 		(const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
 	const uint64_t *sgs = (const uint64_t *)(rx + 1);
 	int i;
 
-	nix_dump("tag \t\t0x%x\tq \t\t%d\t\tnode \t\t%d\tcqe_type \t%d",
+	nix_dump(file, "tag \t\t0x%x\tq \t\t%d\t\tnode \t\t%d\tcqe_type \t%d",
 		 cq->tag, cq->q, cq->node, cq->cqe_type);
 
-	nix_dump("W0: chan \t0x%x\t\tdesc_sizem1 \t%d", rx->chan,
+	nix_dump(file, "W0: chan \t0x%x\t\tdesc_sizem1 \t%d", rx->chan,
 		 rx->desc_sizem1);
-	nix_dump("W0: imm_copy \t%d\t\texpress \t%d", rx->imm_copy,
+	nix_dump(file, "W0: imm_copy \t%d\t\texpress \t%d", rx->imm_copy,
 		 rx->express);
-	nix_dump("W0: wqwd \t%d\t\terrlev \t\t%d\t\terrcode \t%d", rx->wqwd,
+	nix_dump(file, "W0: wqwd \t%d\t\terrlev \t\t%d\t\terrcode \t%d", rx->wqwd,
 		 rx->errlev, rx->errcode);
-	nix_dump("W0: latype \t%d\t\tlbtype \t\t%d\t\tlctype \t\t%d",
+	nix_dump(file, "W0: latype \t%d\t\tlbtype \t\t%d\t\tlctype \t\t%d",
 		 rx->latype, rx->lbtype, rx->lctype);
-	nix_dump("W0: ldtype \t%d\t\tletype \t\t%d\t\tlftype \t\t%d",
+	nix_dump(file, "W0: ldtype \t%d\t\tletype \t\t%d\t\tlftype \t\t%d",
 		 rx->ldtype, rx->letype, rx->lftype);
-	nix_dump("W0: lgtype \t%d \t\tlhtype \t\t%d", rx->lgtype, rx->lhtype);
+	nix_dump(file, "W0: lgtype \t%d \t\tlhtype \t\t%d", rx->lgtype, rx->lhtype);
 
-	nix_dump("W1: pkt_lenm1 \t%d", rx->pkt_lenm1);
-	nix_dump("W1: l2m \t%d\t\tl2b \t\t%d\t\tl3m \t\t%d\tl3b \t\t%d",
+	nix_dump(file, "W1: pkt_lenm1 \t%d", rx->pkt_lenm1);
+	nix_dump(file, "W1: l2m \t%d\t\tl2b \t\t%d\t\tl3m \t\t%d\tl3b \t\t%d",
 		 rx->l2m, rx->l2b, rx->l3m, rx->l3b);
-	nix_dump("W1: vtag0_valid %d\t\tvtag0_gone \t%d", rx->vtag0_valid,
+	nix_dump(file, "W1: vtag0_valid %d\t\tvtag0_gone \t%d", rx->vtag0_valid,
 		 rx->vtag0_gone);
-	nix_dump("W1: vtag1_valid %d\t\tvtag1_gone \t%d", rx->vtag1_valid,
+	nix_dump(file, "W1: vtag1_valid %d\t\tvtag1_gone \t%d", rx->vtag1_valid,
 		 rx->vtag1_gone);
-	nix_dump("W1: pkind \t%d", rx->pkind);
-	nix_dump("W1: vtag0_tci \t%d\t\tvtag1_tci \t%d", rx->vtag0_tci,
+	nix_dump(file, "W1: pkind \t%d", rx->pkind);
+	nix_dump(file, "W1: vtag0_tci \t%d\t\tvtag1_tci \t%d", rx->vtag0_tci,
 		 rx->vtag1_tci);
 
-	nix_dump("W2: laflags \t%d\t\tlbflags\t\t%d\t\tlcflags \t%d",
+	nix_dump(file, "W2: laflags \t%d\t\tlbflags\t\t%d\t\tlcflags \t%d",
 		 rx->laflags, rx->lbflags, rx->lcflags);
-	nix_dump("W2: ldflags \t%d\t\tleflags\t\t%d\t\tlfflags \t%d",
+	nix_dump(file, "W2: ldflags \t%d\t\tleflags\t\t%d\t\tlfflags \t%d",
 		 rx->ldflags, rx->leflags, rx->lfflags);
-	nix_dump("W2: lgflags \t%d\t\tlhflags \t%d", rx->lgflags, rx->lhflags);
+	nix_dump(file, "W2: lgflags \t%d\t\tlhflags \t%d", rx->lgflags, rx->lhflags);
 
-	nix_dump("W3: eoh_ptr \t%d\t\twqe_aura \t%d\t\tpb_aura \t%d",
+	nix_dump(file, "W3: eoh_ptr \t%d\t\twqe_aura \t%d\t\tpb_aura \t%d",
 		 rx->eoh_ptr, rx->wqe_aura, rx->pb_aura);
-	nix_dump("W3: match_id \t%d", rx->match_id);
+	nix_dump(file, "W3: match_id \t%d", rx->match_id);
 
-	nix_dump("W4: laptr \t%d\t\tlbptr \t\t%d\t\tlcptr \t\t%d", rx->laptr,
+	nix_dump(file, "W4: laptr \t%d\t\tlbptr \t\t%d\t\tlcptr \t\t%d", rx->laptr,
 		 rx->lbptr, rx->lcptr);
-	nix_dump("W4: ldptr \t%d\t\tleptr \t\t%d\t\tlfptr \t\t%d", rx->ldptr,
+	nix_dump(file, "W4: ldptr \t%d\t\tleptr \t\t%d\t\tlfptr \t\t%d", rx->ldptr,
 		 rx->leptr, rx->lfptr);
-	nix_dump("W4: lgptr \t%d\t\tlhptr \t\t%d", rx->lgptr, rx->lhptr);
+	nix_dump(file, "W4: lgptr \t%d\t\tlhptr \t\t%d", rx->lgptr, rx->lhptr);
 
-	nix_dump("W5: vtag0_ptr \t%d\t\tvtag1_ptr \t%d\t\tflow_key_alg \t%d",
+	nix_dump(file, "W5: vtag0_ptr \t%d\t\tvtag1_ptr \t%d\t\tflow_key_alg \t%d",
 		 rx->vtag0_ptr, rx->vtag1_ptr, rx->flow_key_alg);
 
 	for (i = 0; i < (rx->desc_sizem1 + 1) << 1; i++)
-		nix_dump("sg[%u] = %p", i, (void *)sgs[i]);
+		nix_dump(file, "sg[%u] = %p", i, (void *)sgs[i]);
 }
 
 void
-roc_nix_rq_dump(struct roc_nix_rq *rq)
+roc_nix_rq_dump(struct roc_nix_rq *rq, FILE *file)
 {
-	nix_dump("nix_rq@%p", rq);
-	nix_dump("  qid = %d", rq->qid);
-	nix_dump("  aura_handle = 0x%" PRIx64 "", rq->aura_handle);
-	nix_dump("  ipsec_ena = %d", rq->ipsech_ena);
-	nix_dump("  first_skip = %d", rq->first_skip);
-	nix_dump("  later_skip = %d", rq->later_skip);
-	nix_dump("  lpb_size = %d", rq->lpb_size);
-	nix_dump("  sso_ena = %d", rq->sso_ena);
-	nix_dump("  tag_mask = %d", rq->tag_mask);
-	nix_dump("  flow_tag_width = %d", rq->flow_tag_width);
-	nix_dump("  tt = %d", rq->tt);
-	nix_dump("  hwgrp = %d", rq->hwgrp);
-	nix_dump("  vwqe_ena = %d", rq->vwqe_ena);
-	nix_dump("  vwqe_first_skip = %d", rq->vwqe_first_skip);
-	nix_dump("  vwqe_max_sz_exp = %d", rq->vwqe_max_sz_exp);
-	nix_dump("  vwqe_wait_tmo = %ld", rq->vwqe_wait_tmo);
-	nix_dump("  vwqe_aura_handle = %ld", rq->vwqe_aura_handle);
-	nix_dump("  roc_nix = %p", rq->roc_nix);
-	nix_dump("  inl_dev_refs = %d", rq->inl_dev_refs);
+	nix_dump(file, "nix_rq@%p", rq);
+	nix_dump(file, "  qid = %d", rq->qid);
+	nix_dump(file, "  aura_handle = 0x%" PRIx64 "", rq->aura_handle);
+	nix_dump(file, "  ipsec_ena = %d", rq->ipsech_ena);
+	nix_dump(file, "  first_skip = %d", rq->first_skip);
+	nix_dump(file, "  later_skip = %d", rq->later_skip);
+	nix_dump(file, "  lpb_size = %d", rq->lpb_size);
+	nix_dump(file, "  sso_ena = %d", rq->sso_ena);
+	nix_dump(file, "  tag_mask = %d", rq->tag_mask);
+	nix_dump(file, "  flow_tag_width = %d", rq->flow_tag_width);
+	nix_dump(file, "  tt = %d", rq->tt);
+	nix_dump(file, "  hwgrp = %d", rq->hwgrp);
+	nix_dump(file, "  vwqe_ena = %d", rq->vwqe_ena);
+	nix_dump(file, "  vwqe_first_skip = %d", rq->vwqe_first_skip);
+	nix_dump(file, "  vwqe_max_sz_exp = %d", rq->vwqe_max_sz_exp);
+	nix_dump(file, "  vwqe_wait_tmo = %ld", rq->vwqe_wait_tmo);
+	nix_dump(file, "  vwqe_aura_handle = %ld", rq->vwqe_aura_handle);
+	nix_dump(file, "  roc_nix = %p", rq->roc_nix);
+	nix_dump(file, "  inl_dev_refs = %d", rq->inl_dev_refs);
 }
 
 void
-roc_nix_cq_dump(struct roc_nix_cq *cq)
+roc_nix_cq_dump(struct roc_nix_cq *cq, FILE *file)
 {
-	nix_dump("nix_cq@%p", cq);
-	nix_dump("  qid = %d", cq->qid);
-	nix_dump("  qnb_desc = %d", cq->nb_desc);
-	nix_dump("  roc_nix = %p", cq->roc_nix);
-	nix_dump("  door = 0x%" PRIx64 "", cq->door);
-	nix_dump("  status = %p", cq->status);
-	nix_dump("  wdata = 0x%" PRIx64 "", cq->wdata);
-	nix_dump("  desc_base = %p", cq->desc_base);
-	nix_dump("  qmask = 0x%" PRIx32 "", cq->qmask);
+	nix_dump(file, "nix_cq@%p", cq);
+	nix_dump(file, "  qid = %d", cq->qid);
+	nix_dump(file, "  qnb_desc = %d", cq->nb_desc);
+	nix_dump(file, "  roc_nix = %p", cq->roc_nix);
+	nix_dump(file, "  door = 0x%" PRIx64 "", cq->door);
+	nix_dump(file, "  status = %p", cq->status);
+	nix_dump(file, "  wdata = 0x%" PRIx64 "", cq->wdata);
+	nix_dump(file, "  desc_base = %p", cq->desc_base);
+	nix_dump(file, "  qmask = 0x%" PRIx32 "", cq->qmask);
 }
 
 void
-roc_nix_sq_dump(struct roc_nix_sq *sq)
+roc_nix_sq_dump(struct roc_nix_sq *sq, FILE *file)
 {
-	nix_dump("nix_sq@%p", sq);
-	nix_dump("  qid = %d", sq->qid);
-	nix_dump("  max_sqe_sz = %d", sq->max_sqe_sz);
-	nix_dump("  nb_desc = %d", sq->nb_desc);
-	nix_dump("  sqes_per_sqb_log2 = %d", sq->sqes_per_sqb_log2);
-	nix_dump("  roc_nix= %p", sq->roc_nix);
-	nix_dump("  aura_handle = 0x%" PRIx64 "", sq->aura_handle);
-	nix_dump("  nb_sqb_bufs_adj = %d", sq->nb_sqb_bufs_adj);
-	nix_dump("  nb_sqb_bufs = %d", sq->nb_sqb_bufs);
-	nix_dump("  io_addr = 0x%" PRIx64 "", sq->io_addr);
-	nix_dump("  lmt_addr = %p", sq->lmt_addr);
-	nix_dump("  sqe_mem = %p", sq->sqe_mem);
-	nix_dump("  fc = %p", sq->fc);
+	nix_dump(file, "nix_sq@%p", sq);
+	nix_dump(file, "  qid = %d", sq->qid);
+	nix_dump(file, "  max_sqe_sz = %d", sq->max_sqe_sz);
+	nix_dump(file, "  nb_desc = %d", sq->nb_desc);
+	nix_dump(file, "  sqes_per_sqb_log2 = %d", sq->sqes_per_sqb_log2);
+	nix_dump(file, "  roc_nix= %p", sq->roc_nix);
+	nix_dump(file, "  aura_handle = 0x%" PRIx64 "", sq->aura_handle);
+	nix_dump(file, "  nb_sqb_bufs_adj = %d", sq->nb_sqb_bufs_adj);
+	nix_dump(file, "  nb_sqb_bufs = %d", sq->nb_sqb_bufs);
+	nix_dump(file, "  io_addr = 0x%" PRIx64 "", sq->io_addr);
+	nix_dump(file, "  lmt_addr = %p", sq->lmt_addr);
+	nix_dump(file, "  sqe_mem = %p", sq->sqe_mem);
+	nix_dump(file, "  fc = %p", sq->fc);
 };
 
 static uint8_t
 nix_tm_reg_dump_prep(uint16_t hw_lvl, uint16_t schq, uint16_t link,
 		     uint64_t *reg, char regstr[][NIX_REG_NAME_SZ])
 {
+	FILE *file = NULL;
 	uint8_t k = 0;
 
 	switch (hw_lvl) {
@@ -1022,7 +1040,7 @@ nix_tm_reg_dump_prep(uint16_t hw_lvl, uint16_t schq, uint16_t link,
 	}
 
 	if (k > MAX_REGS_PER_MBOX_MSG) {
-		nix_dump("\t!!!NIX TM Registers request overflow!!!");
+		nix_dump(file, "\t!!!NIX TM Registers request overflow!!!");
 		return 0;
 	}
 	return k;
@@ -1040,6 +1058,7 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 	struct nix_tm_node *root = NULL;
 	uint32_t schq, parent_schq;
 	bool found = false;
+	FILE *file = NULL;
 	uint8_t j, k, rc;
 
 	TAILQ_FOREACH(node, list, node) {
@@ -1067,7 +1086,7 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 			parent_lvlstr = nix_tm_hwlvl2str(node->hw_lvl + 1);
 		}
 
-		nix_dump("\t(%p%s) %s_%d->%s_%d", node,
+		nix_dump(file, "\t(%p%s) %s_%d->%s_%d", node,
 			 node->child_realloc ? "[CR]" : "", lvlstr, schq,
 			 parent_lvlstr, parent_schq);
 
@@ -1092,15 +1111,15 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 		rc = mbox_process_msg(mbox, (void **)&rsp);
 		if (!rc) {
 			for (j = 0; j < k; j++)
-				nix_dump("\t\t%s=0x%016" PRIx64, regstr[j],
+				nix_dump(file, "\t\t%s=0x%016" PRIx64, regstr[j],
 					 rsp->regval[j]);
 		} else {
-			nix_dump("\t!!!Failed to dump registers!!!");
+			nix_dump(file, "\t!!!Failed to dump registers!!!");
 		}
 	}
 
 	if (found)
-		nix_dump("\n");
+		nix_dump(file, "\n");
 
 	/* Dump TL1 node data when root level is TL2 */
 	if (root && root->hw_lvl == NIX_TXSCH_LVL_TL2) {
@@ -1117,171 +1136,182 @@ nix_tm_dump_lvl(struct nix *nix, struct nix_tm_node_list *list, uint8_t hw_lvl)
 		rc = mbox_process_msg(mbox, (void **)&rsp);
 		if (!rc) {
 			for (j = 0; j < k; j++)
-				nix_dump("\t\t%s=0x%016" PRIx64, regstr[j],
+				nix_dump(file, "\t\t%s=0x%016" PRIx64, regstr[j],
 					 rsp->regval[j]);
 		} else {
-			nix_dump("\t!!!Failed to dump registers!!!");
+			nix_dump(file, "\t!!!Failed to dump registers!!!");
 		}
-		nix_dump("\n");
+		nix_dump(file, "\n");
 	}
 }
 
 void
-roc_nix_tm_dump(struct roc_nix *roc_nix)
+roc_nix_tm_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct dev *dev = &nix->dev;
 	uint8_t hw_lvl, i;
 
-	nix_dump("===TM hierarchy and registers dump of %s (pf:vf) (%d:%d)===",
+	nix_dump(file, "===TM hierarchy and registers dump of %s (pf:vf) (%d:%d)===",
 		 nix->pci_dev->name, dev_get_pf(dev->pf_func),
 		 dev_get_vf(dev->pf_func));
 
 	/* Dump all trees */
 	for (i = 0; i < ROC_NIX_TM_TREE_MAX; i++) {
-		nix_dump("\tTM %s:", nix_tm_tree2str(i));
+		nix_dump(file, "\tTM %s:", nix_tm_tree2str(i));
 		for (hw_lvl = 0; hw_lvl <= NIX_TXSCH_LVL_CNT; hw_lvl++)
 			nix_tm_dump_lvl(nix, &nix->trees[i], hw_lvl);
 	}
 
 	/* Dump unused resources */
-	nix_dump("\tTM unused resources:");
+	nix_dump(file, "\tTM unused resources:");
 	hw_lvl = NIX_TXSCH_LVL_SMQ;
 	for (; hw_lvl < NIX_TXSCH_LVL_CNT; hw_lvl++) {
-		nix_dump("\t\ttxschq        %7s num = %d",
+		nix_dump(file, "\t\ttxschq        %7s num = %d",
 			 nix_tm_hwlvl2str(hw_lvl),
 			 nix_tm_resource_avail(nix, hw_lvl, false));
 
-		nix_bitmap_dump(nix->schq_bmp[hw_lvl]);
-		nix_dump("\n");
+		nix_bitmap_dump(nix->schq_bmp[hw_lvl], file);
+		nix_dump(file, "\n");
 
-		nix_dump("\t\ttxschq_contig %7s num = %d",
+		nix_dump(file, "\t\ttxschq_contig %7s num = %d",
 			 nix_tm_hwlvl2str(hw_lvl),
 			 nix_tm_resource_avail(nix, hw_lvl, true));
-		nix_bitmap_dump(nix->schq_contig_bmp[hw_lvl]);
-		nix_dump("\n");
+		nix_bitmap_dump(nix->schq_contig_bmp[hw_lvl], file);
+		nix_dump(file, "\n");
 	}
 }
 
 void
-roc_nix_dump(struct roc_nix *roc_nix)
+roc_nix_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct dev *dev = &nix->dev;
 	int i;
 
-	nix_dump("nix@%p", nix);
-	nix_dump("  pf = %d", dev_get_pf(dev->pf_func));
-	nix_dump("  vf = %d", dev_get_vf(dev->pf_func));
-	nix_dump("  bar2 = 0x%" PRIx64, dev->bar2);
-	nix_dump("  bar4 = 0x%" PRIx64, dev->bar4);
-	nix_dump("  port_id = %d", roc_nix->port_id);
-	nix_dump("  rss_tag_as_xor = %d", roc_nix->rss_tag_as_xor);
-	nix_dump("  rss_tag_as_xor = %d", roc_nix->max_sqb_count);
-	nix_dump("  outb_nb_desc = %u", roc_nix->outb_nb_desc);
+	nix_dump(file, "nix@%p", nix);
+	nix_dump(file, "  pf = %d", dev_get_pf(dev->pf_func));
+	nix_dump(file, "  vf = %d", dev_get_vf(dev->pf_func));
+	nix_dump(file, "  bar2 = 0x%" PRIx64, dev->bar2);
+	nix_dump(file, "  bar4 = 0x%" PRIx64, dev->bar4);
+	nix_dump(file, "  port_id = %d", roc_nix->port_id);
+	nix_dump(file, "  rss_tag_as_xor = %d", roc_nix->rss_tag_as_xor);
+	nix_dump(file, "  rss_tag_as_xor = %d", roc_nix->max_sqb_count);
+	nix_dump(file, "  outb_nb_desc = %u", roc_nix->outb_nb_desc);
 
-	nix_dump("  \tpci_dev = %p", nix->pci_dev);
-	nix_dump("  \tbase = 0x%" PRIxPTR "", nix->base);
-	nix_dump("  \tlmt_base = 0x%" PRIxPTR "", nix->lmt_base);
-	nix_dump("  \treta_size = %d", nix->reta_sz);
-	nix_dump("  \ttx_chan_base = %d", nix->tx_chan_base);
-	nix_dump("  \trx_chan_base = %d", nix->rx_chan_base);
-	nix_dump("  \tnb_rx_queues = %d", nix->nb_rx_queues);
-	nix_dump("  \tnb_tx_queues = %d", nix->nb_tx_queues);
-	nix_dump("  \tlso_tsov6_idx = %d", nix->lso_tsov6_idx);
-	nix_dump("  \tlso_tsov4_idx = %d", nix->lso_tsov4_idx);
-	nix_dump("  \tlso_udp_tun_v4v4 = %d",
+	nix_dump(file, "  \tpci_dev = %p", nix->pci_dev);
+	nix_dump(file, "  \tbase = 0x%" PRIxPTR "", nix->base);
+	nix_dump(file, "  \tlmt_base = 0x%" PRIxPTR "", nix->lmt_base);
+	nix_dump(file, "  \treta_size = %d", nix->reta_sz);
+	nix_dump(file, "  \ttx_chan_base = %d", nix->tx_chan_base);
+	nix_dump(file, "  \trx_chan_base = %d", nix->rx_chan_base);
+	nix_dump(file, "  \tnb_rx_queues = %d", nix->nb_rx_queues);
+	nix_dump(file, "  \tnb_tx_queues = %d", nix->nb_tx_queues);
+	nix_dump(file, "  \tlso_tsov6_idx = %d", nix->lso_tsov6_idx);
+	nix_dump(file, "  \tlso_tsov4_idx = %d", nix->lso_tsov4_idx);
+	nix_dump(file, "  \tlso_udp_tun_v4v4 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V4V4]);
-	nix_dump("  \tlso_udp_tun_v4v6 = %d",
+	nix_dump(file, "  \tlso_udp_tun_v4v6 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V4V6]);
-	nix_dump("  \tlso_udp_tun_v6v4 = %d",
+	nix_dump(file, "  \tlso_udp_tun_v6v4 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V6V4]);
-	nix_dump("  \tlso_udp_tun_v6v6 = %d",
+	nix_dump(file, "  \tlso_udp_tun_v6v6 = %d",
 		 nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V6V6]);
-	nix_dump("  \tlso_tun_v4v4 = %d",
+	nix_dump(file, "  \tlso_tun_v4v4 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V4V4]);
-	nix_dump("  \tlso_tun_v4v6 = %d",
+	nix_dump(file, "  \tlso_tun_v4v6 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V4V6]);
-	nix_dump("  \tlso_tun_v6v4 = %d",
+	nix_dump(file, "  \tlso_tun_v6v4 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V6V4]);
-	nix_dump("  \tlso_tun_v6v6 = %d",
+	nix_dump(file, "  \tlso_tun_v6v6 = %d",
 		 nix->lso_tun_idx[ROC_NIX_LSO_TUN_V6V6]);
-	nix_dump("  \tlf_rx_stats = %d", nix->lf_rx_stats);
-	nix_dump("  \tlf_tx_stats = %d", nix->lf_tx_stats);
-	nix_dump("  \trx_chan_cnt = %d", nix->rx_chan_cnt);
-	nix_dump("  \ttx_chan_cnt = %d", nix->tx_chan_cnt);
-	nix_dump("  \tcgx_links = %d", nix->cgx_links);
-	nix_dump("  \tlbk_links = %d", nix->lbk_links);
-	nix_dump("  \tsdp_links = %d", nix->sdp_links);
-	nix_dump("  \ttx_link = %d", nix->tx_link);
-	nix_dump("  \tsqb_size = %d", nix->sqb_size);
-	nix_dump("  \tmsixoff = %d", nix->msixoff);
+	nix_dump(file, "  \tlf_rx_stats = %d", nix->lf_rx_stats);
+	nix_dump(file, "  \tlf_tx_stats = %d", nix->lf_tx_stats);
+	nix_dump(file, "  \trx_chan_cnt = %d", nix->rx_chan_cnt);
+	nix_dump(file, "  \ttx_chan_cnt = %d", nix->tx_chan_cnt);
+	nix_dump(file, "  \tcgx_links = %d", nix->cgx_links);
+	nix_dump(file, "  \tlbk_links = %d", nix->lbk_links);
+	nix_dump(file, "  \tsdp_links = %d", nix->sdp_links);
+	nix_dump(file, "  \ttx_link = %d", nix->tx_link);
+	nix_dump(file, "  \tsqb_size = %d", nix->sqb_size);
+	nix_dump(file, "  \tmsixoff = %d", nix->msixoff);
 	for (i = 0; i < nix->nb_cpt_lf; i++)
-		nix_dump("  \tcpt_msixoff[%d] = %d", i, nix->cpt_msixoff[i]);
-	nix_dump("  \tcints = %d", nix->cints);
-	nix_dump("  \tqints = %d", nix->qints);
-	nix_dump("  \tsdp_link = %d", nix->sdp_link);
-	nix_dump("  \tptp_en = %d", nix->ptp_en);
-	nix_dump("  \trss_alg_idx = %d", nix->rss_alg_idx);
-	nix_dump("  \ttx_pause = %d", nix->tx_pause);
-	nix_dump("  \tinl_inb_ena = %d", nix->inl_inb_ena);
-	nix_dump("  \tinl_outb_ena = %d", nix->inl_outb_ena);
-	nix_dump("  \tinb_sa_base = 0x%p", nix->inb_sa_base);
-	nix_dump("  \tinb_sa_sz = %" PRIu64, nix->inb_sa_sz);
-	nix_dump("  \toutb_sa_base = 0x%p", nix->outb_sa_base);
-	nix_dump("  \toutb_sa_sz = %" PRIu64, nix->outb_sa_sz);
-	nix_dump("  \toutb_err_sso_pffunc = 0x%x", nix->outb_err_sso_pffunc);
-	nix_dump("  \tcpt_lf_base = 0x%p", nix->cpt_lf_base);
-	nix_dump("  \tnb_cpt_lf = %d", nix->nb_cpt_lf);
-	nix_dump("  \tinb_inl_dev = %d", nix->inb_inl_dev);
+		nix_dump(file, "  \tcpt_msixoff[%d] = %d", i, nix->cpt_msixoff[i]);
+	nix_dump(file, "  \tcints = %d", nix->cints);
+	nix_dump(file, "  \tqints = %d", nix->qints);
+	nix_dump(file, "  \tsdp_link = %d", nix->sdp_link);
+	nix_dump(file, "  \tptp_en = %d", nix->ptp_en);
+	nix_dump(file, "  \trss_alg_idx = %d", nix->rss_alg_idx);
+	nix_dump(file, "  \ttx_pause = %d", nix->tx_pause);
+	nix_dump(file, "  \tinl_inb_ena = %d", nix->inl_inb_ena);
+	nix_dump(file, "  \tinl_outb_ena = %d", nix->inl_outb_ena);
+	nix_dump(file, "  \tinb_sa_base = 0x%p", nix->inb_sa_base);
+	nix_dump(file, "  \tinb_sa_sz = %" PRIu64, nix->inb_sa_sz);
+	nix_dump(file, "  \toutb_sa_base = 0x%p", nix->outb_sa_base);
+	nix_dump(file, "  \toutb_sa_sz = %" PRIu64, nix->outb_sa_sz);
+	nix_dump(file, "  \toutb_err_sso_pffunc = 0x%x", nix->outb_err_sso_pffunc);
+	nix_dump(file, "  \tcpt_lf_base = 0x%p", nix->cpt_lf_base);
+	nix_dump(file, "  \tnb_cpt_lf = %d", nix->nb_cpt_lf);
+	nix_dump(file, "  \tinb_inl_dev = %d", nix->inb_inl_dev);
+
 }
 
 void
-roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
+roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev, FILE *file)
 {
-	struct nix_inl_dev *inl_dev =
-		(struct nix_inl_dev *)&roc_inl_dev->reserved;
-	struct dev *dev = &inl_dev->dev;
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+	struct dev *dev = NULL;
 	int i;
 
-	nix_dump("nix_inl_dev@%p", inl_dev);
-	nix_dump("  pf = %d", dev_get_pf(dev->pf_func));
-	nix_dump("  vf = %d", dev_get_vf(dev->pf_func));
-	nix_dump("  bar2 = 0x%" PRIx64, dev->bar2);
-	nix_dump("  bar4 = 0x%" PRIx64, dev->bar4);
+	if (roc_inl_dev) {
+		inl_dev = (struct nix_inl_dev *)&roc_inl_dev->reserved;
+	} else {
+		if (idev && idev->nix_inl_dev)
+			inl_dev = idev->nix_inl_dev;
+		else
+			return;
+	}
 
-	nix_dump("  \tpci_dev = %p", inl_dev->pci_dev);
-	nix_dump("  \tnix_base = 0x%" PRIxPTR "", inl_dev->nix_base);
-	nix_dump("  \tsso_base = 0x%" PRIxPTR "", inl_dev->sso_base);
-	nix_dump("  \tssow_base = 0x%" PRIxPTR "", inl_dev->ssow_base);
-	nix_dump("  \tnix_msixoff = %d", inl_dev->nix_msixoff);
-	nix_dump("  \tsso_msixoff = %d", inl_dev->sso_msixoff);
-	nix_dump("  \tssow_msixoff = %d", inl_dev->ssow_msixoff);
-	nix_dump("  \tnix_cints = %d", inl_dev->cints);
-	nix_dump("  \tnix_qints = %d", inl_dev->qints);
-	nix_dump("  \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
-	nix_dump("  \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
-	nix_dump("  \txaq_buf_size = %u", inl_dev->xaq_buf_size);
-	nix_dump("  \txae_waes = %u", inl_dev->xae_waes);
-	nix_dump("  \tiue = %u", inl_dev->iue);
-	nix_dump("  \txaq_aura = 0x%" PRIx64, inl_dev->xaq.aura_handle);
-	nix_dump("  \txaq_mem = 0x%p", inl_dev->xaq.mem);
+	dev = &inl_dev->dev;
+	nix_dump(file, "nix_inl_dev@%p", inl_dev);
+	nix_dump(file, "  pf = %d", dev_get_pf(dev->pf_func));
+	nix_dump(file, "  vf = %d", dev_get_vf(dev->pf_func));
+	nix_dump(file, "  bar2 = 0x%" PRIx64, dev->bar2);
+	nix_dump(file, "  bar4 = 0x%" PRIx64, dev->bar4);
 
-	nix_dump("  \tinl_dev_rq:");
+	nix_dump(file, "  \tpci_dev = %p", inl_dev->pci_dev);
+	nix_dump(file, "  \tnix_base = 0x%" PRIxPTR "", inl_dev->nix_base);
+	nix_dump(file, "  \tsso_base = 0x%" PRIxPTR "", inl_dev->sso_base);
+	nix_dump(file, "  \tssow_base = 0x%" PRIxPTR "", inl_dev->ssow_base);
+	nix_dump(file, "  \tnix_msixoff = %d", inl_dev->nix_msixoff);
+	nix_dump(file, "  \tsso_msixoff = %d", inl_dev->sso_msixoff);
+	nix_dump(file, "  \tssow_msixoff = %d", inl_dev->ssow_msixoff);
+	nix_dump(file, "  \tnix_cints = %d", inl_dev->cints);
+	nix_dump(file, "  \tnix_qints = %d", inl_dev->qints);
+	nix_dump(file, "  \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
+	nix_dump(file, "  \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
+	nix_dump(file, "  \txaq_buf_size = %u", inl_dev->xaq_buf_size);
+	nix_dump(file, "  \txae_waes = %u", inl_dev->xae_waes);
+	nix_dump(file, "  \tiue = %u", inl_dev->iue);
+	nix_dump(file, "  \txaq_aura = 0x%" PRIx64, inl_dev->xaq.aura_handle);
+	nix_dump(file, "  \txaq_mem = 0x%p", inl_dev->xaq.mem);
+
+	nix_dump(file, "  \tinl_dev_rq:");
 	for (i = 0; i < inl_dev->nb_rqs; i++)
-		roc_nix_rq_dump(&inl_dev->rqs[i]);
+		roc_nix_rq_dump(&inl_dev->rqs[i], file);
 }
 
 void
-roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix)
+roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct roc_cpt_lf *lf_base = nix->cpt_lf_base;
 	int i;
 
-	nix_dump("nix@%p", nix);
+	nix_dump(file, "nix@%p", nix);
 	for (i = 0; i < nix->nb_cpt_lf; i++) {
-		nix_dump("NIX inline dev outbound CPT LFs:");
+		nix_dump(file, "NIX inline dev outbound CPT LFs:");
 		cpt_lf_print(&lf_base[i]);
 	}
 }
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 555cb28c1a..019cf6d28b 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -195,7 +195,7 @@ struct roc_nix_inl_dev {
 /* NIX Inline Device API */
 int __roc_api roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev);
 int __roc_api roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev);
-void __roc_api roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev);
+void __roc_api roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev, FILE *file);
 bool __roc_api roc_nix_inl_dev_is_probed(void);
 void __roc_api roc_nix_inl_dev_lock(void);
 void __roc_api roc_nix_inl_dev_unlock(void);
@@ -257,6 +257,6 @@ int __roc_api roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
 				  enum roc_nix_inl_sa_sync_op op);
 int __roc_api roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr,
 				    void *sa_cptr, bool inb, uint16_t sa_len);
-void __roc_api roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix);
+void __roc_api roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file);
 
 #endif /* _ROC_NIX_INL_H_ */
diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 5c19bc33fc..445b440447 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -230,7 +230,7 @@ nix_inl_nix_q_irq(void *param)
 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
 			continue;
 		}
-		nix_lf_rq_dump(ctx);
+		nix_lf_rq_dump(ctx, NULL);
 	}
 }
 
@@ -262,7 +262,7 @@ nix_inl_nix_ras_irq(void *param)
 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
 			continue;
 		}
-		nix_lf_rq_dump(ctx);
+		nix_lf_rq_dump(ctx, NULL);
 	}
 }
 
@@ -295,7 +295,7 @@ nix_inl_nix_err_irq(void *param)
 			plt_err("Failed to get rq %d context, rc=%d", q, rc);
 			continue;
 		}
-		nix_lf_rq_dump(ctx);
+		nix_lf_rq_dump(ctx, NULL);
 	}
 }
 
diff --git a/drivers/common/cnxk/roc_nix_irq.c b/drivers/common/cnxk/roc_nix_irq.c
index 71971ef261..d72980fb18 100644
--- a/drivers/common/cnxk/roc_nix_irq.c
+++ b/drivers/common/cnxk/roc_nix_irq.c
@@ -76,7 +76,7 @@ nix_lf_err_irq(void *param)
 	plt_write64(intr, nix->base + NIX_LF_ERR_INT);
 	/* Dump registers to std out */
 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
-	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
+	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
 }
 
 static int
@@ -125,7 +125,7 @@ nix_lf_ras_irq(void *param)
 
 	/* Dump registers to std out */
 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
-	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
+	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
 }
 
 static int
@@ -320,7 +320,7 @@ nix_lf_q_irq(void *param)
 
 	/* Dump registers to std out */
 	roc_nix_lf_reg_dump(nix_priv_to_roc_nix(nix), NULL);
-	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix));
+	roc_nix_queues_ctx_dump(nix_priv_to_roc_nix(nix), NULL);
 }
 
 int
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index a253f412de..2eba44c248 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -455,7 +455,7 @@ struct nix_tm_shaper_profile *nix_tm_shaper_profile_alloc(void);
 void nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile);
 
 uint64_t nix_get_blkaddr(struct dev *dev);
-void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx);
+void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx, FILE *file);
 int nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data);
 int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data,
 			 uint8_t lf_tx_stats, uint8_t lf_rx_stats);
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 81d491a3fd..81fa6b1d93 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -606,8 +606,8 @@ roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
 
 	return 0;
 exit:
-	roc_nix_tm_dump(sq->roc_nix);
-	roc_nix_queues_ctx_dump(sq->roc_nix);
+	roc_nix_tm_dump(sq->roc_nix, NULL);
+	roc_nix_queues_ctx_dump(sq->roc_nix, NULL);
 	return -EFAULT;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* [PATCH v3 32/32] net/cnxk: dumps device private information
  2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
                     ` (29 preceding siblings ...)
  2022-09-12 13:14   ` [PATCH v3 31/32] common/cnxk: dump device basic info to file Nithin Dabilpuram
@ 2022-09-12 13:14   ` Nithin Dabilpuram
  2022-09-16 11:36     ` Jerin Jacob
  30 siblings, 1 reply; 89+ messages in thread
From: Nithin Dabilpuram @ 2022-09-12 13:14 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: jerinj, dev, Rakesh Kudurumalla

From: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Add support for ethdev private data dump callback for
debugging purposes.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     |  1 +
 drivers/net/cnxk/cnxk_ethdev.h     |  1 +
 drivers/net/cnxk/cnxk_ethdev_ops.c | 29 +++++++++++++++++++++++++++++
 3 files changed, 31 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 89f8cc107d..48d6bedb89 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1682,6 +1682,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
 	.tm_ops_get = cnxk_nix_tm_ops_get,
 	.mtr_ops_get = cnxk_nix_mtr_ops_get,
+	.eth_dev_priv_dump  = cnxk_nix_eth_dev_priv_dump,
 };
 
 static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index bed0e0eada..c09e9bff8e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -585,6 +585,7 @@ int cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 			     struct rte_eth_rss_conf *rss_conf);
 int cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
 			       struct rte_eth_rss_conf *rss_conf);
+int cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file);
 
 /* Link */
 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 64beabdd12..0a8b36342a 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -931,6 +931,35 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
 	return rc;
 }
 
+int
+cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int i;
+
+	roc_nix_dump(roc_nix, file);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		roc_nix_rq_dump(&dev->rqs[i], file);
+
+	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+		roc_nix_cq_dump(&dev->cqs[i], file);
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+		roc_nix_sq_dump(&dev->sqs[i], file);
+
+	roc_nix_queues_ctx_dump(roc_nix, file);
+
+	roc_nix_tm_dump(roc_nix, file);
+
+	roc_nix_inl_dev_dump(NULL, file);
+
+	roc_nix_inl_outb_cpt_lfs_dump(roc_nix, file);
+
+	return 0;
+}
+
 int
 cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
 			 struct rte_eth_rss_conf *rss_conf)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 89+ messages in thread

* Re: [PATCH v3 32/32] net/cnxk: dumps device private information
  2022-09-12 13:14   ` [PATCH v3 32/32] net/cnxk: dumps device private information Nithin Dabilpuram
@ 2022-09-16 11:36     ` Jerin Jacob
  0 siblings, 0 replies; 89+ messages in thread
From: Jerin Jacob @ 2022-09-16 11:36 UTC (permalink / raw)
  To: Nithin Dabilpuram
  Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, jerinj, dev,
	Rakesh Kudurumalla

On Mon, Sep 12, 2022 at 6:48 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
>
> Add support for ethdev private data dump callback for
> debugging purposes.
>
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>

Updated the git commits in the series as needed and applied series to
dpdk-next-net-mrvl/for-next-net. Thanks



> ---
>  drivers/net/cnxk/cnxk_ethdev.c     |  1 +
>  drivers/net/cnxk/cnxk_ethdev.h     |  1 +
>  drivers/net/cnxk/cnxk_ethdev_ops.c | 29 +++++++++++++++++++++++++++++
>  3 files changed, 31 insertions(+)
>
> diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
> index 89f8cc107d..48d6bedb89 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.c
> +++ b/drivers/net/cnxk/cnxk_ethdev.c
> @@ -1682,6 +1682,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
>         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
>         .tm_ops_get = cnxk_nix_tm_ops_get,
>         .mtr_ops_get = cnxk_nix_mtr_ops_get,
> +       .eth_dev_priv_dump  = cnxk_nix_eth_dev_priv_dump,
>  };
>
>  static int
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index bed0e0eada..c09e9bff8e 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -585,6 +585,7 @@ int cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
>                              struct rte_eth_rss_conf *rss_conf);
>  int cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
>                                struct rte_eth_rss_conf *rss_conf);
> +int cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file);
>
>  /* Link */
>  void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
> diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
> index 64beabdd12..0a8b36342a 100644
> --- a/drivers/net/cnxk/cnxk_ethdev_ops.c
> +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
> @@ -931,6 +931,35 @@ cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
>         return rc;
>  }
>
> +int
> +cnxk_nix_eth_dev_priv_dump(struct rte_eth_dev *eth_dev, FILE *file)
> +{
> +       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> +       struct roc_nix *roc_nix = &dev->nix;
> +       int i;
> +
> +       roc_nix_dump(roc_nix, file);
> +
> +       for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +               roc_nix_rq_dump(&dev->rqs[i], file);
> +
> +       for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +               roc_nix_cq_dump(&dev->cqs[i], file);
> +
> +       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
> +               roc_nix_sq_dump(&dev->sqs[i], file);
> +
> +       roc_nix_queues_ctx_dump(roc_nix, file);
> +
> +       roc_nix_tm_dump(roc_nix, file);
> +
> +       roc_nix_inl_dev_dump(NULL, file);
> +
> +       roc_nix_inl_outb_cpt_lfs_dump(roc_nix, file);
> +
> +       return 0;
> +}
> +
>  int
>  cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
>                          struct rte_eth_rss_conf *rss_conf)
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 89+ messages in thread

end of thread, other threads:[~2022-09-16 11:36 UTC | newest]

Thread overview: 89+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 02/23] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 03/23] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 04/23] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 05/23] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 06/23] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 07/23] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 08/23] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 09/23] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 10/23] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 11/23] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 12/23] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 13/23] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 14/23] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 15/23] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 16/23] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 17/23] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 18/23] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 19/23] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 20/23] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 21/23] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 22/23] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 23/23] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-08-30  4:51 ` [PATCH 01/23] common/cnxk: fix part value for cn10k Jerin Jacob
2022-08-30  5:16   ` [EXT] " Nithin Kumar Dabilpuram
2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
2022-09-05 13:31   ` [PATCH v2 02/31] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 03/31] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 04/31] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 05/31] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 06/31] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 07/31] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 08/31] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 09/31] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 11/31] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 13/31] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 14/31] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 15/31] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 16/31] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 17/31] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 18/31] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 19/31] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 20/31] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 21/31] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 22/31] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 23/31] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 24/31] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 25/31] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 26/31] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 27/31] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 28/31] net/cnxk: Add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 29/31] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 30/31] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 31/31] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 03/32] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 04/32] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 05/32] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 06/32] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 07/32] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 08/32] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 09/32] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 10/32] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 11/32] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 12/32] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 13/32] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 14/32] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 15/32] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 16/32] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 17/32] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 18/32] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 19/32] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 20/32] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 21/32] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 22/32] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 23/32] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 24/32] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 25/32] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 26/32] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 27/32] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 28/32] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 29/32] net/cnxk: add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 30/32] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 31/32] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 32/32] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-16 11:36     ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).