DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>,
	Pavan Nikhilesh <pbhagavatula@marvell.com>,
	"Shijith Thotton" <sthotton@marvell.com>
Cc: <jerinj@marvell.com>, <dev@dpdk.org>
Subject: [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path
Date: Mon, 12 Sep 2022 18:43:55 +0530	[thread overview]
Message-ID: <20220912131425.1973415-2-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220912131425.1973415-1-ndabilpuram@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add FC check in vector event Tx path, the check needs to be
performed after head wait right before LMTST is issued.
Since, SQB pool fc updates are delayed w.r.t the actual
utilization of pool add sufficient slack to avoid overflow.

Added a new device argument to override the default SQB slack
configured, can be used as follows:

    -a 0002:02:00.0,sqb_slack=32

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/nics/cnxk.rst                 | 12 +++++++
 drivers/common/cnxk/roc_nix.h            |  7 ++--
 drivers/common/cnxk/roc_nix_priv.h       |  1 -
 drivers/common/cnxk/roc_nix_queue.c      | 21 +++++------
 drivers/common/cnxk/roc_nix_tm.c         |  2 +-
 drivers/common/cnxk/roc_nix_tm_ops.c     |  4 +--
 drivers/event/cnxk/cn10k_eventdev.c      |  3 +-
 drivers/event/cnxk/cn9k_eventdev.c       |  3 +-
 drivers/event/cnxk/cn9k_worker.h         |  4 +++
 drivers/event/cnxk/cnxk_eventdev_adptr.c |  9 ++---
 drivers/net/cnxk/cn10k_tx.h              | 46 ++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_ethdev_devargs.c   |  8 ++++-
 12 files changed, 97 insertions(+), 23 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index e24eaa8bc4..eeaa3fa1cc 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -157,6 +157,18 @@ Runtime Config Options
    With the above configuration, each send queue's descriptor buffer count is
    limited to a maximum of 64 buffers.
 
+- ``SQB slack count`` (default ``12``)
+
+   Send queue descriptor slack count added to SQB count when a Tx queue is
+   created, can be set using ``sqb_slack`` ``devargs`` parameter.
+
+   For example::
+
+      -a 0002:02:00.0,sqb_slack=32
+
+   With the above configuration, each send queue's descriptor buffer count will
+   be increased by 32, while keeping the queue limit to default configuration.
+
 - ``Switch header enable`` (default ``none``)
 
    A port can be configured to a specific switch header type by using
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 4671f80e7c..c9aaedc915 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -13,6 +13,8 @@
 #define ROC_NIX_BPF_STATS_MAX	      12
 #define ROC_NIX_MTR_ID_INVALID	      UINT32_MAX
 #define ROC_NIX_PFC_CLASS_INVALID     UINT8_MAX
+#define ROC_NIX_SQB_LOWER_THRESH      70U
+#define ROC_NIX_SQB_SLACK	      12U
 
 enum roc_nix_rss_reta_sz {
 	ROC_NIX_RSS_RETA_SZ_64 = 64,
@@ -410,19 +412,20 @@ struct roc_nix {
 	bool enable_loop;
 	bool hw_vlan_ins;
 	uint8_t lock_rx_ctx;
-	uint32_t outb_nb_desc;
+	uint16_t sqb_slack;
 	uint16_t outb_nb_crypto_qs;
+	uint32_t outb_nb_desc;
 	uint32_t ipsec_in_min_spi;
 	uint32_t ipsec_in_max_spi;
 	uint32_t ipsec_out_max_sa;
 	bool ipsec_out_sso_pffunc;
+	bool custom_sa_action;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
 	bool io_enabled;
 	bool rx_ptp_ena;
 	uint16_t cints;
-	bool custom_sa_action;
 
 #define ROC_NIX_MEM_SZ (6 * 1024)
 	uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 5b0522c8cb..a3d4ddf5d5 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -13,7 +13,6 @@
 #define NIX_DEF_SQB	     ((uint16_t)16)
 #define NIX_MIN_SQB	     ((uint16_t)8)
 #define NIX_SQB_LIST_SPACE   ((uint16_t)2)
-#define NIX_SQB_LOWER_THRESH ((uint16_t)70)
 
 /* Apply BP/DROP when CQ is 95% full */
 #define NIX_CQ_THRESH_LEVEL	(5 * 256 / 100)
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index fa4c954631..692b13415a 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -682,12 +682,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	else
 		sqes_per_sqb = (blk_sz / 8) / 8;
 
-	sq->nb_desc = PLT_MAX(256U, sq->nb_desc);
+	sq->nb_desc = PLT_MAX(512U, sq->nb_desc);
 	nb_sqb_bufs = sq->nb_desc / sqes_per_sqb;
 	nb_sqb_bufs += NIX_SQB_LIST_SPACE;
 	/* Clamp up the SQB count */
 	nb_sqb_bufs = PLT_MIN(roc_nix->max_sqb_count,
-			      (uint16_t)PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
+			      PLT_MAX(NIX_DEF_SQB, nb_sqb_bufs));
 
 	sq->nb_sqb_bufs = nb_sqb_bufs;
 	sq->sqes_per_sqb_log2 = (uint16_t)plt_log2_u32(sqes_per_sqb);
@@ -695,8 +695,9 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 		nb_sqb_bufs -
 		(PLT_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb);
 	sq->nb_sqb_bufs_adj =
-		(sq->nb_sqb_bufs_adj * NIX_SQB_LOWER_THRESH) / 100;
+		(sq->nb_sqb_bufs_adj * ROC_NIX_SQB_LOWER_THRESH) / 100;
 
+	nb_sqb_bufs += roc_nix->sqb_slack;
 	/* Explicitly set nat_align alone as by default pool is with both
 	 * nat_align and buf_offset = 1 which we don't want for SQB.
 	 */
@@ -711,12 +712,12 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 		aura.fc_stype = 0x3; /* STSTP */
 	aura.fc_addr = (uint64_t)sq->fc;
 	aura.fc_hyst_bits = 0; /* Store count on all updates */
-	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, NIX_MAX_SQB, &aura,
+	rc = roc_npa_pool_create(&sq->aura_handle, blk_sz, nb_sqb_bufs, &aura,
 				 &pool);
 	if (rc)
 		goto fail;
 
-	sq->sqe_mem = plt_zmalloc(blk_sz * NIX_MAX_SQB, blk_sz);
+	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
 	if (sq->sqe_mem == NULL) {
 		rc = NIX_ERR_NO_MEM;
 		goto nomem;
@@ -724,21 +725,21 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 
 	/* Fill the initial buffers */
 	iova = (uint64_t)sq->sqe_mem;
-	for (count = 0; count < NIX_MAX_SQB; count++) {
+	for (count = 0; count < nb_sqb_bufs; count++) {
 		roc_npa_aura_op_free(sq->aura_handle, 0, iova);
 		iova += blk_sz;
 	}
 
-	if (roc_npa_aura_op_available_wait(sq->aura_handle, NIX_MAX_SQB, 0) !=
-	    NIX_MAX_SQB) {
+	if (roc_npa_aura_op_available_wait(sq->aura_handle, nb_sqb_bufs, 0) !=
+	    nb_sqb_bufs) {
 		plt_err("Failed to free all pointers to the pool");
 		rc = NIX_ERR_NO_MEM;
 		goto npa_fail;
 	}
 
 	roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
-	roc_npa_aura_limit_modify(sq->aura_handle, sq->nb_sqb_bufs);
-	sq->aura_sqb_bufs = NIX_MAX_SQB;
+	roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
+	sq->aura_sqb_bufs = nb_sqb_bufs;
 
 	return rc;
 npa_fail:
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index a31abded1a..81d491a3fd 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -594,7 +594,7 @@ roc_nix_tm_sq_flush_spin(struct roc_nix_sq *sq)
 
 		/* SQ reached quiescent state */
 		if (sqb_cnt <= 1 && head_off == tail_off &&
-		    (*(volatile uint64_t *)sq->fc == sq->nb_sqb_bufs)) {
+		    (*(volatile uint64_t *)sq->fc == sq->aura_sqb_bufs)) {
 			break;
 		}
 
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 4aa55002fe..7036495ad8 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -67,7 +67,7 @@ roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
 	if (enable)
 		*(volatile uint64_t *)sq->fc = rsp->aura.count;
 	else
-		*(volatile uint64_t *)sq->fc = sq->nb_sqb_bufs;
+		*(volatile uint64_t *)sq->fc = sq->aura_sqb_bufs;
 	/* Sync write barrier */
 	plt_wmb();
 	return 0;
@@ -535,7 +535,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 		tail_off = (val >> 28) & 0x3F;
 
 		if (sqb_cnt > 1 || head_off != tail_off ||
-		    (*(uint64_t *)sq->fc != sq->nb_sqb_bufs))
+		    (*(uint64_t *)sq->fc != sq->aura_sqb_bufs))
 			plt_err("Failed to gracefully flush sq %u", sq->qid);
 	}
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 0be7ebfe29..fee01713b4 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -812,7 +812,8 @@ cn10k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 			sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+		txq->nb_sqb_bufs_adj =
+			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 8ade30f84b..992a2a555c 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -1043,7 +1043,8 @@ cn9k_sso_txq_fc_update(const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
 			sq->nb_sqb_bufs_adj -= (cnxk_eth_dev->outb.nb_desc /
 						(sqes_per_sqb - 1));
 		txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
-		txq->nb_sqb_bufs_adj = (70 * txq->nb_sqb_bufs_adj) / 100;
+		txq->nb_sqb_bufs_adj =
+			(ROC_NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 54b3545022..d86cb94a77 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -761,6 +761,10 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
 	    !(flags & NIX_TX_OFFLOAD_SECURITY_F))
 		rte_io_wmb();
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
+	if (((txq->nb_sqb_bufs_adj -
+	      __atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
+	     << txq->sqes_per_sqb_log2) <= 0)
+		return 0;
 	cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
 	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
 			      txq->mark_fmt);
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index b4fd821912..7937cadd25 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -351,14 +351,15 @@ cnxk_sso_sqb_aura_limit_edit(struct roc_nix_sq *sq, uint16_t nb_sqb_bufs)
 {
 	int rc;
 
-	if (sq->nb_sqb_bufs != nb_sqb_bufs) {
+	if (sq->aura_sqb_bufs != nb_sqb_bufs) {
 		rc = roc_npa_aura_limit_modify(
 			sq->aura_handle,
 			RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs));
 		if (rc < 0)
 			return rc;
 
-		sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs);
+		sq->nb_sqb_bufs = RTE_MIN(nb_sqb_bufs, sq->aura_sqb_bufs) -
+				  sq->roc_nix->sqb_slack;
 	}
 	return 0;
 }
@@ -556,7 +557,7 @@ cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
 	} else {
 		txq = eth_dev->data->tx_queues[tx_queue_id];
 		sq = &cnxk_eth_dev->sqs[tx_queue_id];
-		cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+		cnxk_sso_sqb_aura_limit_edit(sq, sq->aura_sqb_bufs);
 		ret = cnxk_sso_updt_tx_queue_data(
 			event_dev, eth_dev->data->port_id, tx_queue_id, txq);
 		if (ret < 0)
@@ -588,7 +589,7 @@ cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
 							     i);
 	} else {
 		sq = &cnxk_eth_dev->sqs[tx_queue_id];
-		cnxk_sso_sqb_aura_limit_edit(sq, sq->nb_sqb_bufs);
+		cnxk_sso_sqb_aura_limit_edit(sq, sq->aura_sqb_bufs);
 		ret = cnxk_sso_updt_tx_queue_data(
 			event_dev, eth_dev->data->port_id, tx_queue_id, NULL);
 		if (ret < 0)
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index ea13866b20..8056510589 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -54,6 +54,31 @@
 
 #define NIX_NB_SEGS_TO_SEGDW(x) ((NIX_SEGDW_MAGIC >> ((x) << 2)) & 0xF)
 
+static __plt_always_inline void
+cn10k_nix_vwqe_wait_fc(struct cn10k_eth_txq *txq, int64_t req)
+{
+	int64_t cached, refill;
+
+retry:
+	while (__atomic_load_n(&txq->fc_cache_pkts, __ATOMIC_RELAXED) < 0)
+		;
+	cached = __atomic_sub_fetch(&txq->fc_cache_pkts, req, __ATOMIC_ACQUIRE);
+	/* Check if there is enough space, else update and retry. */
+	if (cached < 0) {
+		/* Check if we have space else retry. */
+		do {
+			refill =
+				(txq->nb_sqb_bufs_adj -
+				 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
+				<< txq->sqes_per_sqb_log2;
+		} while (refill <= 0);
+		__atomic_compare_exchange(&txq->fc_cache_pkts, &cached, &refill,
+					  0, __ATOMIC_RELEASE,
+					  __ATOMIC_RELAXED);
+		goto retry;
+	}
+}
+
 /* Function to determine no of tx subdesc required in case ext
  * sub desc is enabled.
  */
@@ -1039,6 +1064,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= (15ULL << 12);
 		data |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, 16);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data, pa);
 
@@ -1048,6 +1075,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= ((uint64_t)(burst - 17)) << 12;
 		data |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst - 16);
 		/* STEOR1 */
 		roc_lmt_submit_steorl(data, pa);
 	} else if (burst) {
@@ -1057,6 +1086,8 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
 		data |= ((uint64_t)(burst - 1)) << 12;
 		data |= lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data, pa);
 	}
@@ -1188,6 +1219,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data0 |= (15ULL << 12);
 		data0 |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, 16);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data0, pa0);
 
@@ -1197,6 +1230,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data1 |= ((uint64_t)(burst - 17)) << 12;
 		data1 |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst - 16);
 		/* STEOR1 */
 		roc_lmt_submit_steorl(data1, pa1);
 	} else if (burst) {
@@ -1207,6 +1242,8 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
 		data0 |= ((burst - 1) << 12);
 		data0 |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(data0, pa0);
 	}
@@ -2735,6 +2772,9 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[0] |= (15ULL << 12);
 		wd.data[0] |= (uint64_t)lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq,
+				cn10k_nix_pkts_per_vec_brst(flags) >> 1);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(wd.data[0], pa);
 
@@ -2750,6 +2790,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[1] |= ((uint64_t)(lnum - 17)) << 12;
 		wd.data[1] |= (uint64_t)(lmt_id + 16);
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq,
+				burst - (cn10k_nix_pkts_per_vec_brst(flags) >>
+					 1));
 		/* STEOR1 */
 		roc_lmt_submit_steorl(wd.data[1], pa);
 	} else if (lnum) {
@@ -2765,6 +2809,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		wd.data[0] |= ((uint64_t)(lnum - 1)) << 12;
 		wd.data[0] |= lmt_id;
 
+		if (flags & NIX_TX_VWQE_F)
+			cn10k_nix_vwqe_wait_fc(txq, burst);
 		/* STEOR0 */
 		roc_lmt_submit_steorl(wd.data[0], pa);
 	}
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 248582e1f6..4ded850622 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -246,6 +246,7 @@ parse_sdp_channel_mask(const char *key, const char *value, void *extra_args)
 #define CNXK_SDP_CHANNEL_MASK	"sdp_channel_mask"
 #define CNXK_FLOW_PRE_L2_INFO	"flow_pre_l2_info"
 #define CNXK_CUSTOM_SA_ACT	"custom_sa_act"
+#define CNXK_SQB_SLACK		"sqb_slack"
 
 int
 cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -254,6 +255,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	uint16_t sqb_count = CNXK_NIX_TX_MAX_SQB;
 	struct flow_pre_l2_size_info pre_l2_info;
 	uint32_t ipsec_in_max_spi = BIT(8) - 1;
+	uint16_t sqb_slack = ROC_NIX_SQB_SLACK;
 	uint32_t ipsec_out_max_sa = BIT(12);
 	uint16_t flow_prealloc_size = 1;
 	uint16_t switch_header_type = 0;
@@ -311,6 +313,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 			   &parse_pre_l2_hdr_info, &pre_l2_info);
 	rte_kvargs_process(kvlist, CNXK_CUSTOM_SA_ACT, &parse_flag,
 			   &custom_sa_act);
+	rte_kvargs_process(kvlist, CNXK_SQB_SLACK, &parse_sqb_count,
+			   &sqb_slack);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -328,6 +332,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	dev->nix.reta_sz = reta_sz;
 	dev->nix.lock_rx_ctx = lock_rx_ctx;
 	dev->nix.custom_sa_action = custom_sa_act;
+	dev->nix.sqb_slack = sqb_slack;
 	dev->npc.flow_prealloc_size = flow_prealloc_size;
 	dev->npc.flow_max_priority = flow_max_priority;
 	dev->npc.switch_header_type = switch_header_type;
@@ -356,4 +361,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
 			      CNXK_OUTB_NB_CRYPTO_QS "=<1-64>"
 			      CNXK_NO_INL_DEV "=0"
 			      CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>"
-			      CNXK_CUSTOM_SA_ACT "=1");
+			      CNXK_CUSTOM_SA_ACT "=1"
+			      CNXK_SQB_SLACK "=<12-512>");
-- 
2.25.1


  reply	other threads:[~2022-09-12 13:15 UTC|newest]

Thread overview: 89+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 02/23] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 03/23] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 04/23] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 05/23] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 06/23] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 07/23] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 08/23] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 09/23] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 10/23] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 11/23] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 12/23] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 13/23] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 14/23] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 15/23] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 16/23] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 17/23] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 18/23] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 19/23] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 20/23] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 21/23] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 22/23] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 23/23] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-08-30  4:51 ` [PATCH 01/23] common/cnxk: fix part value for cn10k Jerin Jacob
2022-08-30  5:16   ` [EXT] " Nithin Kumar Dabilpuram
2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
2022-09-05 13:31   ` [PATCH v2 02/31] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 03/31] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 04/31] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 05/31] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 06/31] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 07/31] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 08/31] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 09/31] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 11/31] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 13/31] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 14/31] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 15/31] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 16/31] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 17/31] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 18/31] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 19/31] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 20/31] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 21/31] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 22/31] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 23/31] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 24/31] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 25/31] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 26/31] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 27/31] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 28/31] net/cnxk: Add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 29/31] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 30/31] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-05 13:32   ` [PATCH v2 31/31] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
2022-09-12 13:13   ` Nithin Dabilpuram [this message]
2022-09-12 13:13   ` [PATCH v3 03/32] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 04/32] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 05/32] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-12 13:13   ` [PATCH v3 06/32] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 07/32] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 08/32] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 09/32] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 10/32] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 11/32] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 12/32] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 13/32] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 14/32] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 15/32] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 16/32] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 17/32] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 18/32] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 19/32] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 20/32] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 21/32] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 22/32] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 23/32] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 24/32] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 25/32] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 26/32] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 27/32] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 28/32] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 29/32] net/cnxk: add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 30/32] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 31/32] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-12 13:14   ` [PATCH v3 32/32] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-16 11:36     ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220912131425.1973415-2-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=pbhagavatula@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).