DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	"Kiran Kumar K" <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v3 04/28] common/cnxk: change NIX debug API and queue API interface
Date: Fri, 1 Oct 2021 19:09:58 +0530	[thread overview]
Message-ID: <20211001134022.22700-5-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20211001134022.22700-1-ndabilpuram@marvell.com>

Change NIX debug API and queue API interface for use by
internal NIX inline device initialization.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.c       |   2 +-
 drivers/common/cnxk/roc_nix_debug.c | 118 +++++++++++++++++++++++++++---------
 drivers/common/cnxk/roc_nix_priv.h  |  16 +++++
 drivers/common/cnxk/roc_nix_queue.c |  89 +++++++++++++++------------
 4 files changed, 159 insertions(+), 66 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index ee9e81d..b7ef843 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -306,7 +306,7 @@ sdp_lbk_id_update(struct plt_pci_device *pci_dev, struct nix *nix)
 	}
 }
 
-static inline uint64_t
+uint64_t
 nix_get_blkaddr(struct dev *dev)
 {
 	uint64_t reg;
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 6e56513..9539bb9 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -110,17 +110,12 @@ roc_nix_lf_get_reg_count(struct roc_nix *roc_nix)
 }
 
 int
-roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
+nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data)
 {
-	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
-	uintptr_t nix_lf_base = nix->base;
 	bool dump_stdout;
 	uint64_t reg;
 	uint32_t i;
 
-	if (roc_nix == NULL)
-		return NIX_ERR_PARAM;
-
 	dump_stdout = data ? 0 : 1;
 
 	for (i = 0; i < PLT_DIM(nix_lf_reg); i++) {
@@ -131,8 +126,21 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 			*data++ = reg;
 	}
 
+	return i;
+}
+
+int
+nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint8_t lf_tx_stats,
+		     uint8_t lf_rx_stats)
+{
+	uint32_t i, count = 0;
+	bool dump_stdout;
+	uint64_t reg;
+
+	dump_stdout = data ? 0 : 1;
+
 	/* NIX_LF_TX_STATX */
-	for (i = 0; i < nix->lf_tx_stats; i++) {
+	for (i = 0; i < lf_tx_stats; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_TX_STATX(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_TX_STATX", i,
@@ -140,9 +148,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_RX_STATX */
-	for (i = 0; i < nix->lf_rx_stats; i++) {
+	for (i = 0; i < lf_rx_stats; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_RX_STATX(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_RX_STATX", i,
@@ -151,8 +160,21 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 			*data++ = reg;
 	}
 
+	return count + i;
+}
+
+int
+nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
+		    uint16_t cints)
+{
+	uint32_t i, count = 0;
+	bool dump_stdout;
+	uint64_t reg;
+
+	dump_stdout = data ? 0 : 1;
+
 	/* NIX_LF_QINTX_CNT*/
-	for (i = 0; i < nix->qints; i++) {
+	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_CNT(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_CNT", i,
@@ -160,9 +182,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_QINTX_INT */
-	for (i = 0; i < nix->qints; i++) {
+	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_INT(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_INT", i,
@@ -170,9 +193,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_QINTX_ENA_W1S */
-	for (i = 0; i < nix->qints; i++) {
+	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1S(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1S",
@@ -180,9 +204,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_QINTX_ENA_W1C */
-	for (i = 0; i < nix->qints; i++) {
+	for (i = 0; i < qints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1C(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_QINTX_ENA_W1C",
@@ -190,9 +215,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_CINTX_CNT */
-	for (i = 0; i < nix->cints; i++) {
+	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_CNT(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_CNT", i,
@@ -200,9 +226,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_CINTX_WAIT */
-	for (i = 0; i < nix->cints; i++) {
+	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_WAIT(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_WAIT", i,
@@ -210,9 +237,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_CINTX_INT */
-	for (i = 0; i < nix->cints; i++) {
+	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_INT(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT", i,
@@ -220,9 +248,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_CINTX_INT_W1S */
-	for (i = 0; i < nix->cints; i++) {
+	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_INT_W1S(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_INT_W1S",
@@ -230,9 +259,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_CINTX_ENA_W1S */
-	for (i = 0; i < nix->cints; i++) {
+	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1S(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1S",
@@ -240,9 +270,10 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+	count += i;
 
 	/* NIX_LF_CINTX_ENA_W1C */
-	for (i = 0; i < nix->cints; i++) {
+	for (i = 0; i < cints; i++) {
 		reg = plt_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1C(i));
 		if (dump_stdout && reg)
 			nix_dump("%32s_%d = 0x%" PRIx64, "NIX_LF_CINTX_ENA_W1C",
@@ -250,12 +281,40 @@ roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
 		if (data)
 			*data++ = reg;
 	}
+
+	return count + i;
+}
+
+int
+roc_nix_lf_reg_dump(struct roc_nix *roc_nix, uint64_t *data)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	bool dump_stdout = data ? 0 : 1;
+	uintptr_t nix_base;
+	uint32_t i;
+
+	if (roc_nix == NULL)
+		return NIX_ERR_PARAM;
+
+	nix_base = nix->base;
+	/* General registers */
+	i = nix_lf_gen_reg_dump(nix_base, data);
+
+	/* Rx, Tx stat registers */
+	i += nix_lf_stat_reg_dump(nix_base, dump_stdout ? NULL : &data[i],
+				  nix->lf_tx_stats, nix->lf_rx_stats);
+
+	/* Intr registers */
+	i += nix_lf_int_reg_dump(nix_base, dump_stdout ? NULL : &data[i],
+				 nix->qints, nix->cints);
+
 	return 0;
 }
 
-static int
-nix_q_ctx_get(struct mbox *mbox, uint8_t ctype, uint16_t qid, __io void **ctx_p)
+int
+nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid, __io void **ctx_p)
 {
+	struct mbox *mbox = dev->mbox;
 	int rc;
 
 	if (roc_model_is_cn9k()) {
@@ -485,7 +544,7 @@ nix_cn9k_lf_rq_dump(__io struct nix_rq_ctx_s *ctx)
 	nix_dump("W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
 }
 
-static inline void
+void
 nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx)
 {
 	nix_dump("W0: wqe_aura \t\t\t%d\nW0: len_ol3_dis \t\t\t%d",
@@ -595,12 +654,12 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	int rc = -1, q, rq = nix->nb_rx_queues;
-	struct mbox *mbox = (&nix->dev)->mbox;
 	struct npa_aq_enq_rsp *npa_rsp;
 	struct npa_aq_enq_req *npa_aq;
-	volatile void *ctx;
+	struct dev *dev = &nix->dev;
 	int sq = nix->nb_tx_queues;
 	struct npa_lf *npa_lf;
+	volatile void *ctx;
 	uint32_t sqb_aura;
 
 	npa_lf = idev_npa_obj_get();
@@ -608,7 +667,7 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 		return NPA_ERR_DEVICE_NOT_BOUNDED;
 
 	for (q = 0; q < rq; q++) {
-		rc = nix_q_ctx_get(mbox, NIX_AQ_CTYPE_CQ, q, &ctx);
+		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_CQ, q, &ctx);
 		if (rc) {
 			plt_err("Failed to get cq context");
 			goto fail;
@@ -619,7 +678,7 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 	}
 
 	for (q = 0; q < rq; q++) {
-		rc = nix_q_ctx_get(mbox, NIX_AQ_CTYPE_RQ, q, &ctx);
+		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
 		if (rc) {
 			plt_err("Failed to get rq context");
 			goto fail;
@@ -633,7 +692,7 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix)
 	}
 
 	for (q = 0; q < sq; q++) {
-		rc = nix_q_ctx_get(mbox, NIX_AQ_CTYPE_SQ, q, &ctx);
+		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_SQ, q, &ctx);
 		if (rc) {
 			plt_err("Failed to get sq context");
 			goto fail;
@@ -686,11 +745,13 @@ roc_nix_cqe_dump(const struct nix_cqe_hdr_s *cq)
 {
 	const union nix_rx_parse_u *rx =
 		(const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
+	const uint64_t *sgs = (const uint64_t *)(rx + 1);
+	int i;
 
 	nix_dump("tag \t\t0x%x\tq \t\t%d\t\tnode \t\t%d\tcqe_type \t%d",
 		 cq->tag, cq->q, cq->node, cq->cqe_type);
 
-	nix_dump("W0: chan \t%d\t\tdesc_sizem1 \t%d", rx->chan,
+	nix_dump("W0: chan \t0x%x\t\tdesc_sizem1 \t%d", rx->chan,
 		 rx->desc_sizem1);
 	nix_dump("W0: imm_copy \t%d\t\texpress \t%d", rx->imm_copy,
 		 rx->express);
@@ -731,6 +792,9 @@ roc_nix_cqe_dump(const struct nix_cqe_hdr_s *cq)
 
 	nix_dump("W5: vtag0_ptr \t%d\t\tvtag1_ptr \t%d\t\tflow_key_alg \t%d",
 		 rx->vtag0_ptr, rx->vtag1_ptr, rx->flow_key_alg);
+
+	for (i = 0; i < (rx->desc_sizem1 + 1) << 1; i++)
+		nix_dump("sg[%u] = %p", i, (void *)sgs[i]);
 }
 
 void
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index b573879..b140dad 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -352,6 +352,12 @@ int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
 int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
 			 bool rr_quantum_only);
 
+int nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
+		    bool cfg, bool ena);
+int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
+	       bool ena);
+int nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable);
+
 /*
  * TM priv utils.
  */
@@ -397,4 +403,14 @@ void nix_tm_node_free(struct nix_tm_node *node);
 struct nix_tm_shaper_profile *nix_tm_shaper_profile_alloc(void);
 void nix_tm_shaper_profile_free(struct nix_tm_shaper_profile *profile);
 
+uint64_t nix_get_blkaddr(struct dev *dev);
+void nix_lf_rq_dump(__io struct nix_cn10k_rq_ctx_s *ctx);
+int nix_lf_gen_reg_dump(uintptr_t nix_lf_base, uint64_t *data);
+int nix_lf_stat_reg_dump(uintptr_t nix_lf_base, uint64_t *data,
+			 uint8_t lf_tx_stats, uint8_t lf_rx_stats);
+int nix_lf_int_reg_dump(uintptr_t nix_lf_base, uint64_t *data, uint16_t qints,
+			uint16_t cints);
+int nix_q_ctx_get(struct dev *dev, uint8_t ctype, uint16_t qid,
+		  __io void **ctx_p);
+
 #endif /* _ROC_NIX_PRIV_H_ */
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index d7c4844..cff0ec3 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -29,46 +29,54 @@ nix_qsize_clampup(uint32_t val)
 }
 
 int
+nix_rq_ena_dis(struct dev *dev, struct roc_nix_rq *rq, bool enable)
+{
+	struct mbox *mbox = dev->mbox;
+
+	/* Pkts will be dropped silently if RQ is disabled */
+	if (roc_model_is_cn9k()) {
+		struct nix_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_aq_enq(mbox);
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_WRITE;
+
+		aq->rq.ena = enable;
+		aq->rq_mask.ena = ~(aq->rq_mask.ena);
+	} else {
+		struct nix_cn10k_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_WRITE;
+
+		aq->rq.ena = enable;
+		aq->rq_mask.ena = ~(aq->rq_mask.ena);
+	}
+
+	return mbox_process(mbox);
+}
+
+int
 roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
 {
 	struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
-	struct mbox *mbox = (&nix->dev)->mbox;
 	int rc;
 
-	/* Pkts will be dropped silently if RQ is disabled */
-	if (roc_model_is_cn9k()) {
-		struct nix_aq_enq_req *aq;
-
-		aq = mbox_alloc_msg_nix_aq_enq(mbox);
-		aq->qidx = rq->qid;
-		aq->ctype = NIX_AQ_CTYPE_RQ;
-		aq->op = NIX_AQ_INSTOP_WRITE;
-
-		aq->rq.ena = enable;
-		aq->rq_mask.ena = ~(aq->rq_mask.ena);
-	} else {
-		struct nix_cn10k_aq_enq_req *aq;
-
-		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
-		aq->qidx = rq->qid;
-		aq->ctype = NIX_AQ_CTYPE_RQ;
-		aq->op = NIX_AQ_INSTOP_WRITE;
-
-		aq->rq.ena = enable;
-		aq->rq_mask.ena = ~(aq->rq_mask.ena);
-	}
-
-	rc = mbox_process(mbox);
+	rc = nix_rq_ena_dis(&nix->dev, rq, enable);
 
 	if (roc_model_is_cn10k())
 		plt_write64(rq->qid, nix->base + NIX_LF_OP_VWQE_FLUSH);
 	return rc;
 }
 
-static int
-rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
+int
+nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
+		bool cfg, bool ena)
 {
-	struct mbox *mbox = (&nix->dev)->mbox;
+	struct mbox *mbox = dev->mbox;
 	struct nix_aq_enq_req *aq;
 
 	aq = mbox_alloc_msg_nix_aq_enq(mbox);
@@ -118,7 +126,7 @@ rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
-	aq->rq.qint_idx = rq->qid % nix->qints;
+	aq->rq.qint_idx = rq->qid % qints;
 	aq->rq.xqe_drop_ena = 1;
 
 	/* If RED enabled, then fill enable for all cases */
@@ -179,11 +187,12 @@ rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 	return 0;
 }
 
-static int
-rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
+int
+nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
+	   bool ena)
 {
-	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_cn10k_aq_enq_req *aq;
+	struct mbox *mbox = dev->mbox;
 
 	aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
 	aq->qidx = rq->qid;
@@ -220,8 +229,10 @@ rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 		aq->rq.cq = rq->qid;
 	}
 
-	if (rq->ipsech_ena)
+	if (rq->ipsech_ena) {
 		aq->rq.ipsech_ena = 1;
+		aq->rq.ipsecd_drop_en = 1;
+	}
 
 	aq->rq.lpb_aura = roc_npa_aura_handle_to_aura(rq->aura_handle);
 
@@ -260,7 +271,7 @@ rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 	aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 	aq->rq.rq_int_ena = 0;
 	/* Many to one reduction */
-	aq->rq.qint_idx = rq->qid % nix->qints;
+	aq->rq.qint_idx = rq->qid % qints;
 	aq->rq.xqe_drop_ena = 1;
 
 	/* If RED enabled, then fill enable for all cases */
@@ -359,6 +370,7 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct mbox *mbox = (&nix->dev)->mbox;
 	bool is_cn9k = roc_model_is_cn9k();
+	struct dev *dev = &nix->dev;
 	int rc;
 
 	if (roc_nix == NULL || rq == NULL)
@@ -370,9 +382,9 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	rq->roc_nix = roc_nix;
 
 	if (is_cn9k)
-		rc = rq_cn9k_cfg(nix, rq, false, ena);
+		rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
 	else
-		rc = rq_cfg(nix, rq, false, ena);
+		rc = nix_rq_cfg(dev, rq, nix->qints, false, ena);
 
 	if (rc)
 		return rc;
@@ -386,6 +398,7 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
 	struct mbox *mbox = (&nix->dev)->mbox;
 	bool is_cn9k = roc_model_is_cn9k();
+	struct dev *dev = &nix->dev;
 	int rc;
 
 	if (roc_nix == NULL || rq == NULL)
@@ -397,9 +410,9 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	rq->roc_nix = roc_nix;
 
 	if (is_cn9k)
-		rc = rq_cn9k_cfg(nix, rq, true, ena);
+		rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, true, ena);
 	else
-		rc = rq_cfg(nix, rq, true, ena);
+		rc = nix_rq_cfg(dev, rq, nix->qints, true, ena);
 
 	if (rc)
 		return rc;
-- 
2.8.4


  parent reply	other threads:[~2021-10-01 13:40 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02  2:14 [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 01/27] common/cnxk: add security support for cn9k fast path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 02/27] common/cnxk: add helper API to dump cpt parse header Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 03/27] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 04/27] common/cnxk: change nix debug API and queue API interface Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 05/27] common/cnxk: add nix inline device irq API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 06/27] common/cnxk: add nix inline device init and fini Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 07/27] common/cnxk: add nix inline inbound and outbound support API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 08/27] common/cnxk: dump cpt lf registers on error intr Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 09/27] common/cnxk: align cpt lf enable/disable sequence Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 10/27] common/cnxk: restore nix sqb pool limit before destroy Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 11/27] common/cnxk: add cq enable support in nix Tx path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 12/27] common/cnxk: setup aura bp conf based on nix Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 13/27] common/cnxk: add anti-replay check implementation for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 14/27] common/cnxk: add inline IPsec support in rte flow Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 15/27] net/cnxk: add inline security support for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 16/27] net/cnxk: add inline security support for cn10k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 17/27] net/cnxk: add cn9k Rx support for security offload Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 18/27] net/cnxk: add cn9k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 19/27] net/cnxk: add cn10k Rx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 20/27] net/cnxk: add cn10k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 21/27] net/cnxk: add cn9k anti replay " Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 22/27] net/cnxk: add cn10k IPsec transport mode support Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 23/27] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 24/27] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 25/27] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 26/27] net/cnxk: add devargs for configuring channel mask Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 27/27] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-09-29 12:44 ` [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Jerin Jacob
2021-09-30 17:00 ` [dpdk-dev] [PATCH v2 00/28] " Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-01  5:37   ` [dpdk-dev] [PATCH v2 00/28] net/cnxk: support for inline ipsec Jerin Jacob
2021-10-01 13:39 ` [dpdk-dev] [PATCH v3 " Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-10-01 13:39   ` Nithin Dabilpuram [this message]
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-10-06 16:21     ` Ferruh Yigit
2021-10-06 16:44       ` Nithin Kumar Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-02 13:49   ` [dpdk-dev] [PATCH v3 00/28] net/cnxk: support for inline ipsec Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211001134022.22700-5-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).