DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	"Kiran Kumar K" <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>, Ray Kinsella <mdr@ashroe.eu>
Cc: <dev@dpdk.org>
Subject: [PATCH v4 28/28] common/cnxk: add support for per-port RQ in inline device
Date: Sun, 8 May 2022 11:56:16 +0530	[thread overview]
Message-ID: <20220508062616.3398-28-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220508062616.3398-1-ndabilpuram@marvell.com>

Add support for per port RQ in inline device thereby using
Aura/Pool attributes from that port specific first RQ.
When inline device is used with channel masking, it will
fallback to single RQ for all ethdev ports.

Also remove clamping up of CQ size for LBK ethdev when
inline inbound is enabled as now backpressure is supported
even on LBK ethdevs.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.h             |   2 +-
 drivers/common/cnxk/roc_nix_debug.c       |   7 +-
 drivers/common/cnxk/roc_nix_inl.c         |  81 ++++++++--------
 drivers/common/cnxk/roc_nix_inl.h         |   5 +-
 drivers/common/cnxk/roc_nix_inl_dev.c     |  42 ++++++--
 drivers/common/cnxk/roc_nix_inl_dev_irq.c | 155 +++++++++++++++++++-----------
 drivers/common/cnxk/roc_nix_inl_priv.h    |  12 ++-
 drivers/common/cnxk/roc_npc.c             |  13 ++-
 drivers/common/cnxk/version.map           |   1 -
 drivers/net/cnxk/cnxk_ethdev.c            |  14 +--
 10 files changed, 202 insertions(+), 130 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 1019e37..1c38af0 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -309,7 +309,7 @@ struct roc_nix_rq {
 	bool spb_drop_ena;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
-	bool inl_dev_ref;
+	uint16_t inl_dev_refs;
 };
 
 struct roc_nix_cq {
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 1ae0451..e05e60d 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -826,7 +826,7 @@ roc_nix_rq_dump(struct roc_nix_rq *rq)
 	nix_dump("  vwqe_wait_tmo = %ld", rq->vwqe_wait_tmo);
 	nix_dump("  vwqe_aura_handle = %ld", rq->vwqe_aura_handle);
 	nix_dump("  roc_nix = %p", rq->roc_nix);
-	nix_dump("  inl_dev_ref = %d", rq->inl_dev_ref);
+	nix_dump("  inl_dev_refs = %d", rq->inl_dev_refs);
 }
 
 void
@@ -1243,6 +1243,7 @@ roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
 	struct nix_inl_dev *inl_dev =
 		(struct nix_inl_dev *)&roc_inl_dev->reserved;
 	struct dev *dev = &inl_dev->dev;
+	int i;
 
 	nix_dump("nix_inl_dev@%p", inl_dev);
 	nix_dump("  pf = %d", dev_get_pf(dev->pf_func));
@@ -1259,7 +1260,6 @@ roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
 	nix_dump("  \tssow_msixoff = %d", inl_dev->ssow_msixoff);
 	nix_dump("  \tnix_cints = %d", inl_dev->cints);
 	nix_dump("  \tnix_qints = %d", inl_dev->qints);
-	nix_dump("  \trq_refs = %d", inl_dev->rq_refs);
 	nix_dump("  \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
 	nix_dump("  \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
 	nix_dump("  \txaq_buf_size = %u", inl_dev->xaq_buf_size);
@@ -1269,5 +1269,6 @@ roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
 	nix_dump("  \txaq_mem = 0x%p", inl_dev->xaq.mem);
 
 	nix_dump("  \tinl_dev_rq:");
-	roc_nix_rq_dump(&inl_dev->rq);
+	for (i = 0; i < inl_dev->nb_rqs; i++)
+		roc_nix_rq_dump(&inl_dev->rqs[i]);
 }
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 05c663d..28d01b0 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -585,8 +585,10 @@ int
 roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 {
 	struct idev_cfg *idev = idev_get_cfg();
+	int port_id = rq->roc_nix->port_id;
 	struct nix_inl_dev *inl_dev;
 	struct roc_nix_rq *inl_rq;
+	uint16_t inl_rq_id;
 	struct dev *dev;
 	int rc;
 
@@ -598,19 +600,24 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 	if (!inl_dev)
 		return 0;
 
+	/* Check if this RQ is already holding reference */
+	if (rq->inl_dev_refs)
+		return 0;
+
+	inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
+	dev = &inl_dev->dev;
+	inl_rq = &inl_dev->rqs[inl_rq_id];
+
 	/* Just take reference if already inited */
-	if (inl_dev->rq_refs) {
-		inl_dev->rq_refs++;
-		rq->inl_dev_ref = true;
+	if (inl_rq->inl_dev_refs) {
+		inl_rq->inl_dev_refs++;
+		rq->inl_dev_refs = 1;
 		return 0;
 	}
-
-	dev = &inl_dev->dev;
-	inl_rq = &inl_dev->rq;
 	memset(inl_rq, 0, sizeof(struct roc_nix_rq));
 
 	/* Take RQ pool attributes from the first ethdev RQ */
-	inl_rq->qid = 0;
+	inl_rq->qid = inl_rq_id;
 	inl_rq->aura_handle = rq->aura_handle;
 	inl_rq->first_skip = rq->first_skip;
 	inl_rq->later_skip = rq->later_skip;
@@ -688,8 +695,8 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq)
 		return rc;
 	}
 
-	inl_dev->rq_refs++;
-	rq->inl_dev_ref = true;
+	inl_rq->inl_dev_refs++;
+	rq->inl_dev_refs = 1;
 	return 0;
 }
 
@@ -697,15 +704,17 @@ int
 roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 {
 	struct idev_cfg *idev = idev_get_cfg();
+	int port_id = rq->roc_nix->port_id;
 	struct nix_inl_dev *inl_dev;
 	struct roc_nix_rq *inl_rq;
+	uint16_t inl_rq_id;
 	struct dev *dev;
 	int rc;
 
 	if (idev == NULL)
 		return 0;
 
-	if (!rq->inl_dev_ref)
+	if (!rq->inl_dev_refs)
 		return 0;
 
 	inl_dev = idev->nix_inl_dev;
@@ -715,13 +724,15 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 		return -EFAULT;
 	}
 
-	rq->inl_dev_ref = false;
-	inl_dev->rq_refs--;
-	if (inl_dev->rq_refs)
-		return 0;
-
 	dev = &inl_dev->dev;
-	inl_rq = &inl_dev->rq;
+	inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
+	inl_rq = &inl_dev->rqs[inl_rq_id];
+
+	rq->inl_dev_refs = 0;
+	inl_rq->inl_dev_refs--;
+	if (inl_rq->inl_dev_refs)
+		return 0;
+
 	/* There are no more references, disable RQ */
 	rc = nix_rq_ena_dis(dev, inl_rq, false);
 	if (rc)
@@ -737,25 +748,6 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
 	return rc;
 }
 
-uint64_t
-roc_nix_inl_dev_rq_limit_get(void)
-{
-	struct idev_cfg *idev = idev_get_cfg();
-	struct nix_inl_dev *inl_dev;
-	struct roc_nix_rq *inl_rq;
-
-	if (!idev || !idev->nix_inl_dev)
-		return 0;
-
-	inl_dev = idev->nix_inl_dev;
-	if (!inl_dev->rq_refs)
-		return 0;
-
-	inl_rq = &inl_dev->rq;
-
-	return roc_npa_aura_op_limit_get(inl_rq->aura_handle);
-}
-
 void
 roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
 {
@@ -804,15 +796,22 @@ roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix)
 }
 
 struct roc_nix_rq *
-roc_nix_inl_dev_rq(void)
+roc_nix_inl_dev_rq(struct roc_nix *roc_nix)
 {
 	struct idev_cfg *idev = idev_get_cfg();
+	int port_id = roc_nix->port_id;
 	struct nix_inl_dev *inl_dev;
+	struct roc_nix_rq *inl_rq;
+	uint16_t inl_rq_id;
 
 	if (idev != NULL) {
 		inl_dev = idev->nix_inl_dev;
-		if (inl_dev != NULL && inl_dev->rq_refs)
-			return &inl_dev->rq;
+		if (inl_dev != NULL) {
+			inl_rq_id = inl_dev->nb_rqs > 1 ? port_id : 0;
+			inl_rq = &inl_dev->rqs[inl_rq_id];
+			if (inl_rq->inl_dev_refs)
+				return inl_rq;
+		}
 	}
 
 	return NULL;
@@ -1022,6 +1021,7 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
 	void *sa, *sa_base = NULL;
 	struct nix *nix = NULL;
 	uint16_t max_spi = 0;
+	uint32_t rq_refs = 0;
 	uint8_t pkind = 0;
 	int i;
 
@@ -1044,7 +1044,10 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
 	}
 
 	if (inl_dev) {
-		if (inl_dev->rq_refs == 0) {
+		for (i = 0; i < inl_dev->nb_rqs; i++)
+			rq_refs += inl_dev->rqs[i].inl_dev_refs;
+
+		if (rq_refs == 0) {
 			inl_dev->ts_ena = ts_ena;
 			max_spi = inl_dev->ipsec_in_max_spi;
 			sa_base = inl_dev->inb_sa_base;
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 633f090..7835ba3 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -168,12 +168,11 @@ void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
 int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq);
 int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
 bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
-struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(void);
+struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
 int __roc_api roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix,
 					 uint32_t tag_const, uint8_t tt);
-uint64_t __roc_api roc_nix_inl_dev_rq_limit_get(void);
 int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
-					uint16_t max_frags);
+					   uint16_t max_frags);
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
 				       bool inb_inl_dev);
 
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 5e61a42..b304937 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -334,6 +334,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
 	struct nix_lf_alloc_rsp *rsp;
 	struct nix_lf_alloc_req *req;
 	struct nix_hw_info *hw_info;
+	struct roc_nix_rq *rqs;
 	uint64_t max_sa, i;
 	size_t inb_sa_sz;
 	int rc = -ENOSPC;
@@ -345,7 +346,8 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
 	req = mbox_alloc_msg_nix_lf_alloc(mbox);
 	if (req == NULL)
 		return rc;
-	req->rq_cnt = 1;
+	/* We will have per-port RQ if it is not with channel masking */
+	req->rq_cnt = inl_dev->nb_rqs;
 	req->sq_cnt = 1;
 	req->cq_cnt = 1;
 	/* XQESZ is W16 */
@@ -421,6 +423,14 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
 		goto free_mem;
 	}
 
+	/* Allocate memory for RQ's */
+	rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
+	if (!rqs) {
+		plt_err("Failed to allocate memory for RQ's");
+		goto free_mem;
+	}
+	inl_dev->rqs = rqs;
+
 	return 0;
 free_mem:
 	plt_free(inl_dev->inb_sa_base);
@@ -464,7 +474,15 @@ nix_inl_nix_release(struct nix_inl_dev *inl_dev)
 	if (req == NULL)
 		return -ENOSPC;
 
-	return mbox_process(mbox);
+	rc = mbox_process(mbox);
+	if (rc)
+		return rc;
+
+	plt_free(inl_dev->rqs);
+	plt_free(inl_dev->inb_sa_base);
+	inl_dev->rqs = NULL;
+	inl_dev->inb_sa_base = NULL;
+	return 0;
 }
 
 static int
@@ -584,10 +602,13 @@ roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
 
 no_pool:
 	/* Disable RQ if enabled */
-	if (inl_dev->rq_refs) {
-		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, false);
+	for (i = 0; i < inl_dev->nb_rqs; i++) {
+		if (!inl_dev->rqs[i].inl_dev_refs)
+			continue;
+		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], false);
 		if (rc) {
-			plt_err("Failed to disable inline dev RQ, rc=%d", rc);
+			plt_err("Failed to disable inline dev RQ %d, rc=%d", i,
+				rc);
 			return rc;
 		}
 	}
@@ -633,10 +654,14 @@ roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle)
 
 exit:
 	/* Renable RQ */
-	if (inl_dev->rq_refs) {
-		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rq, true);
+	for (i = 0; i < inl_dev->nb_rqs; i++) {
+		if (!inl_dev->rqs[i].inl_dev_refs)
+			continue;
+
+		rc = nix_rq_ena_dis(&inl_dev->dev, &inl_dev->rqs[i], true);
 		if (rc)
-			plt_err("Failed to enable inline dev RQ, rc=%d", rc);
+			plt_err("Failed to enable inline dev RQ %d, rc=%d", i,
+				rc);
 	}
 
 	return rc;
@@ -815,6 +840,7 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
 	inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
 	inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
+	inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
 
 	if (roc_inl_dev->spb_drop_pc)
 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
diff --git a/drivers/common/cnxk/roc_nix_inl_dev_irq.c b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
index 1855f36..5c19bc3 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev_irq.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev_irq.c
@@ -179,50 +179,59 @@ nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev)
 static void
 nix_inl_nix_q_irq(void *param)
 {
-	struct nix_inl_dev *inl_dev = (struct nix_inl_dev *)param;
+	struct nix_inl_qint *qints_mem = (struct nix_inl_qint *)param;
+	struct nix_inl_dev *inl_dev = qints_mem->inl_dev;
 	uintptr_t nix_base = inl_dev->nix_base;
 	struct dev *dev = &inl_dev->dev;
+	uint16_t qint = qints_mem->qint;
 	volatile void *ctx;
 	uint64_t reg, intr;
+	uint64_t wdata;
 	uint8_t irq;
-	int rc;
+	int rc, q;
 
-	intr = plt_read64(nix_base + NIX_LF_QINTX_INT(0));
+	intr = plt_read64(nix_base + NIX_LF_QINTX_INT(qint));
 	if (intr == 0)
 		return;
 
 	plt_err("Queue_intr=0x%" PRIx64 " qintx 0 pf=%d, vf=%d", intr, dev->pf,
 		dev->vf);
 
-	/* Get and clear RQ0 interrupt */
-	reg = roc_atomic64_add_nosync(0,
-				      (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
-	if (reg & BIT_ULL(42) /* OP_ERR */) {
-		plt_err("Failed to get rq_int");
-		return;
+	/* Handle RQ interrupts */
+	for (q = 0; q < inl_dev->nb_rqs; q++) {
+		/* Get and clear RQ interrupts */
+		wdata = (uint64_t)q << 44;
+		reg = roc_atomic64_add_nosync(wdata,
+					      (int64_t *)(nix_base + NIX_LF_RQ_OP_INT));
+		if (reg & BIT_ULL(42) /* OP_ERR */) {
+			plt_err("Failed to get rq_int");
+			return;
+		}
+		irq = reg & 0xff;
+		plt_write64(wdata | irq, nix_base + NIX_LF_RQ_OP_INT);
+
+		if (irq & BIT_ULL(NIX_RQINT_DROP))
+			plt_err("RQ=0 NIX_RQINT_DROP");
+
+		if (irq & BIT_ULL(NIX_RQINT_RED))
+			plt_err("RQ=0 NIX_RQINT_RED");
 	}
-	irq = reg & 0xff;
-	plt_write64(0 | irq, nix_base + NIX_LF_RQ_OP_INT);
-
-	if (irq & BIT_ULL(NIX_RQINT_DROP))
-		plt_err("RQ=0 NIX_RQINT_DROP");
-
-	if (irq & BIT_ULL(NIX_RQINT_RED))
-		plt_err("RQ=0 NIX_RQINT_RED");
 
 	/* Clear interrupt */
-	plt_write64(intr, nix_base + NIX_LF_QINTX_INT(0));
+	plt_write64(intr, nix_base + NIX_LF_QINTX_INT(qint));
 
 	/* Dump registers to std out */
 	nix_inl_nix_reg_dump(inl_dev);
 
-	/* Dump RQ 0 */
-	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
-	if (rc) {
-		plt_err("Failed to get rq context");
-		return;
+	/* Dump RQs */
+	for (q = 0; q < inl_dev->nb_rqs; q++) {
+		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
+		if (rc) {
+			plt_err("Failed to get rq %d context, rc=%d", q, rc);
+			continue;
+		}
+		nix_lf_rq_dump(ctx);
 	}
-	nix_lf_rq_dump(ctx);
 }
 
 static void
@@ -233,7 +242,7 @@ nix_inl_nix_ras_irq(void *param)
 	struct dev *dev = &inl_dev->dev;
 	volatile void *ctx;
 	uint64_t intr;
-	int rc;
+	int rc, q;
 
 	intr = plt_read64(nix_base + NIX_LF_RAS);
 	if (intr == 0)
@@ -246,13 +255,15 @@ nix_inl_nix_ras_irq(void *param)
 	/* Dump registers to std out */
 	nix_inl_nix_reg_dump(inl_dev);
 
-	/* Dump RQ 0 */
-	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
-	if (rc) {
-		plt_err("Failed to get rq context");
-		return;
+	/* Dump RQs */
+	for (q = 0; q < inl_dev->nb_rqs; q++) {
+		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
+		if (rc) {
+			plt_err("Failed to get rq %d context, rc=%d", q, rc);
+			continue;
+		}
+		nix_lf_rq_dump(ctx);
 	}
-	nix_lf_rq_dump(ctx);
 }
 
 static void
@@ -263,7 +274,7 @@ nix_inl_nix_err_irq(void *param)
 	struct dev *dev = &inl_dev->dev;
 	volatile void *ctx;
 	uint64_t intr;
-	int rc;
+	int rc, q;
 
 	intr = plt_read64(nix_base + NIX_LF_ERR_INT);
 	if (intr == 0)
@@ -277,13 +288,15 @@ nix_inl_nix_err_irq(void *param)
 	/* Dump registers to std out */
 	nix_inl_nix_reg_dump(inl_dev);
 
-	/* Dump RQ 0 */
-	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, 0, &ctx);
-	if (rc) {
-		plt_err("Failed to get rq context");
-		return;
+	/* Dump RQs */
+	for (q = 0; q < inl_dev->nb_rqs; q++) {
+		rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_RQ, q, &ctx);
+		if (rc) {
+			plt_err("Failed to get rq %d context, rc=%d", q, rc);
+			continue;
+		}
+		nix_lf_rq_dump(ctx);
 	}
-	nix_lf_rq_dump(ctx);
 }
 
 int
@@ -291,8 +304,10 @@ nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
 {
 	struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
 	uintptr_t nix_base = inl_dev->nix_base;
+	struct nix_inl_qint *qints_mem;
+	int rc, q, ret = 0;
 	uint16_t msixoff;
-	int rc;
+	int qints;
 
 	msixoff = inl_dev->nix_msixoff;
 	if (msixoff == MSIX_VECTOR_INVALID) {
@@ -317,21 +332,38 @@ nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev)
 	/* Enable RAS interrupts */
 	plt_write64(~0ull, nix_base + NIX_LF_RAS_ENA_W1S);
 
-	/* Setup queue irq for RQ 0 */
+	/* Setup queue irq for RQ's */
+	qints = PLT_MIN(inl_dev->nb_rqs, inl_dev->qints);
+	qints_mem = plt_zmalloc(sizeof(struct nix_inl_qint) * qints, 0);
+	if (!qints_mem) {
+		plt_err("Failed to allocate memory for %u qints", qints);
+		return -ENOMEM;
+	}
 
-	/* Clear QINT CNT, interrupt */
-	plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
-	plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
+	inl_dev->configured_qints = qints;
+	inl_dev->qints_mem = qints_mem;
 
-	/* Register queue irq vector */
-	rc |= dev_irq_register(handle, nix_inl_nix_q_irq, inl_dev,
-			       msixoff + NIX_LF_INT_VEC_QINT_START);
+	for (q = 0; q < qints; q++) {
+		/* Clear QINT CNT, interrupt */
+		plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
+		plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
 
-	plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
-	plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
-	/* Enable QINT interrupt */
-	plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(0));
+		/* Register queue irq vector */
+		ret = dev_irq_register(handle, nix_inl_nix_q_irq, &qints_mem[q],
+				       msixoff + NIX_LF_INT_VEC_QINT_START + q);
+		if (ret)
+			break;
 
+		plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
+		plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
+		/* Enable QINT interrupt */
+		plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1S(q));
+
+		qints_mem[q].inl_dev = inl_dev;
+		qints_mem[q].qint = q;
+	}
+
+	rc |= ret;
 	return rc;
 }
 
@@ -339,8 +371,10 @@ void
 nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
 {
 	struct plt_intr_handle *handle = inl_dev->pci_dev->intr_handle;
+	struct nix_inl_qint *qints_mem = inl_dev->qints_mem;
 	uintptr_t nix_base = inl_dev->nix_base;
 	uint16_t msixoff;
+	int q;
 
 	msixoff = inl_dev->nix_msixoff;
 	/* Disable err interrupts */
@@ -353,14 +387,19 @@ nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev)
 	dev_irq_unregister(handle, nix_inl_nix_ras_irq, inl_dev,
 			   msixoff + NIX_LF_INT_VEC_POISON);
 
-	/* Clear QINT CNT */
-	plt_write64(0, nix_base + NIX_LF_QINTX_CNT(0));
-	plt_write64(0, nix_base + NIX_LF_QINTX_INT(0));
+	for (q = 0; q < inl_dev->configured_qints; q++) {
+		/* Clear QINT CNT */
+		plt_write64(0, nix_base + NIX_LF_QINTX_CNT(q));
+		plt_write64(0, nix_base + NIX_LF_QINTX_INT(q));
 
-	/* Disable QINT interrupt */
-	plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(0));
+		/* Disable QINT interrupt */
+		plt_write64(~0ull, nix_base + NIX_LF_QINTX_ENA_W1C(q));
 
-	/* Unregister queue irq vector */
-	dev_irq_unregister(handle, nix_inl_nix_q_irq, inl_dev,
-			   msixoff + NIX_LF_INT_VEC_QINT_START);
+		/* Unregister queue irq vector */
+		dev_irq_unregister(handle, nix_inl_nix_q_irq, &qints_mem[q],
+				   msixoff + NIX_LF_INT_VEC_QINT_START + q);
+	}
+
+	plt_free(inl_dev->qints_mem);
+	inl_dev->qints_mem = NULL;
 }
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index 1ab8470..d61c7b2 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -6,6 +6,12 @@
 #include <pthread.h>
 #include <sys/types.h>
 
+struct nix_inl_dev;
+struct nix_inl_qint {
+	struct nix_inl_dev *inl_dev;
+	uint16_t qint;
+};
+
 struct nix_inl_dev {
 	/* Base device object */
 	struct dev dev;
@@ -42,8 +48,10 @@ struct nix_inl_dev {
 	uint16_t vwqe_interval;
 	uint16_t cints;
 	uint16_t qints;
-	struct roc_nix_rq rq;
-	uint16_t rq_refs;
+	uint16_t configured_qints;
+	struct roc_nix_rq *rqs;
+	struct nix_inl_qint *qints_mem;
+	uint16_t nb_rqs;
 	bool is_nix1;
 	uint8_t spb_drop_pc;
 	uint8_t lpb_drop_pc;
diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c
index 51e36f1..3dee1ff 100644
--- a/drivers/common/cnxk/roc_npc.c
+++ b/drivers/common/cnxk/roc_npc.c
@@ -307,6 +307,7 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr,
 	bool vlan_insert_action = false;
 	int sel_act, req_act = 0;
 	uint16_t pf_func, vf_id;
+	struct roc_nix *roc_nix;
 	int errcode = 0;
 	int mark = 0;
 	int rq = 0;
@@ -392,11 +393,19 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr,
 			 */
 			req_act |= ROC_NPC_ACTION_TYPE_SEC;
 			rq = 0;
+			roc_nix = roc_npc->roc_nix;
 
 			/* Special processing when with inline device */
-			if (roc_nix_inb_is_with_inl_dev(roc_npc->roc_nix) &&
+			if (roc_nix_inb_is_with_inl_dev(roc_nix) &&
 			    roc_nix_inl_dev_is_probed()) {
-				rq = 0;
+				struct roc_nix_rq *inl_rq;
+
+				inl_rq = roc_nix_inl_dev_rq(roc_nix);
+				if (!inl_rq) {
+					errcode = NPC_ERR_INTERNAL;
+					goto err_exit;
+				}
+				rq = inl_rq->qid;
 				pf_func = nix_inl_dev_pffunc_get();
 			}
 			break;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 53586da..a77f3f6 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -138,7 +138,6 @@ INTERNAL {
 	roc_nix_inl_dev_rq;
 	roc_nix_inl_dev_rq_get;
 	roc_nix_inl_dev_rq_put;
-	roc_nix_inl_dev_rq_limit_get;
 	roc_nix_inl_dev_unlock;
 	roc_nix_inl_dev_xaq_realloc;
 	roc_nix_inl_inb_is_enabled;
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 3912c24..09e5736 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -546,19 +546,6 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		eth_dev->data->rx_queues[qid] = NULL;
 	}
 
-	/* Clam up cq limit to size of packet pool aura for LBK
-	 * to avoid meta packet drop as LBK does not currently support
-	 * backpressure.
-	 */
-	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
-		uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
-
-		/* Use current RQ's aura limit if inl rq is not available */
-		if (!pkt_pool_limit)
-			pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
-		nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
-	}
-
 	/* Its a no-op when inline device is not used */
 	if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
 	    dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
@@ -1675,6 +1662,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 	/* Initialize base roc nix */
 	nix->pci_dev = pci_dev;
 	nix->hw_vlan_ins = true;
+	nix->port_id = eth_dev->data->port_id;
 	rc = roc_nix_dev_init(nix);
 	if (rc) {
 		plt_err("Failed to initialize roc nix rc=%d", rc);
-- 
2.8.4


  parent reply	other threads:[~2022-05-08  6:29 UTC|newest]

Thread overview: 110+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-19  5:58 [PATCH 01/24] common/cnxk: add multi channel support for SDP send queues Nithin Dabilpuram
2022-04-19  5:58 ` [PATCH 02/24] net/cnxk: add receive channel backpressure for SDP Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 03/24] common/cnxk: add new pkind for CPT when ts is enabled Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 04/24] common/cnxk: support to configure the ts pkind in CPT Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 05/24] common/cnxk: fix SQ flush sequence Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 06/24] common/cnxk: skip probing SoC environment for CN9k Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 07/24] common/cnxk: fix issues in soft expiry disable path Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 08/24] common/cnxk: convert warning to debug print Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 09/24] common/cnxk: use aggregate level rr prio from mbox Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 10/24] net/cnxk: support loopback mode on AF VF's Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 11/24] net/cnxk: update LBK ethdev link info Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 12/24] net/cnxk: add barrier after meta batch free in scalar Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 13/24] net/cnxk: disable default inner chksum for outb inline Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 14/24] net/cnxk: fix roundup size with transport mode Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 15/24] net/cnxk: update inline device in ethdev telemetry Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 16/24] net/cnxk: change env for debug IV Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 17/24] net/cnxk: reset offload flag if reassembly is disabled Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 18/24] net/cnxk: support decrement TTL for inline IPsec Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 19/24] net/cnxk: optimize Rx fast path for security pkts Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 20/24] net/cnxk: update olflags with L3/L4 csum offload Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 21/24] net/cnxk: add capabilities for IPsec crypto algos Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 22/24] net/cnxk: add capabilities for IPsec options Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 23/24] net/cnxk: support security stats Nithin Dabilpuram
2022-04-19  5:59 ` [PATCH 24/24] net/cnxk: add support for flow control for outbound inline Nithin Dabilpuram
2022-05-05 12:55 ` [PATCH v3 01/28] common/cnxk: add multi channel support for SDP send queues Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 02/28] net/cnxk: add receive channel backpressure for SDP Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 03/28] common/cnxk: add new pkind for CPT when ts is enabled Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 04/28] common/cnxk: support to configure the ts pkind in CPT Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 05/28] common/cnxk: fix SQ flush sequence Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 06/28] common/cnxk: skip probing SoC environment for CN9k Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 07/28] common/cnxk: fix issues in soft expiry disable path Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 08/28] common/cnxk: convert warning to debug print Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 09/28] common/cnxk: use aggregate level rr prio from mbox Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 10/28] net/cnxk: support loopback mode on AF VF's Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 11/28] net/cnxk: update LBK ethdev link info Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 12/28] net/cnxk: add barrier after meta batch free in scalar Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 13/28] net/cnxk: disable default inner chksum for outb inline Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 14/28] net/cnxk: fix roundup size with transport mode Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 15/28] net/cnxk: update inline device in ethdev telemetry Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 16/28] net/cnxk: change env for debug IV Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 17/28] net/cnxk: reset offload flag if reassembly is disabled Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 18/28] net/cnxk: support decrement TTL for inline IPsec Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 19/28] net/cnxk: optimize Rx fast path for security pkts Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 20/28] net/cnxk: update olflags with L3/L4 csum offload Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 21/28] net/cnxk: add capabilities for IPsec crypto algos Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 22/28] net/cnxk: add capabilities for IPsec options Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 23/28] net/cnxk: support security stats Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 24/28] net/cnxk: add support for flow control for outbound inline Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 25/28] net/cnxk: perform early MTU setup for eventmode Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 26/28] net/cnxk: fix multi-seg extraction in vwqe path Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 27/28] net/cnxk: fix hotplug detach sequence for first device Nithin Dabilpuram
2022-05-05 12:55   ` [PATCH v3 28/28] common/cnxk: add support for per-port RQ in inline device Nithin Dabilpuram
2022-05-07 10:20     ` Jerin Jacob
2022-05-08  6:25 ` [PATCH v4 01/28] common/cnxk: add multi channel support for SDP send queues Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 02/28] net/cnxk: add receive channel backpressure for SDP Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 03/28] common/cnxk: add new pkind for CPT when ts is enabled Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 04/28] common/cnxk: support to configure the ts pkind in CPT Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 05/28] common/cnxk: fix SQ flush sequence Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 06/28] common/cnxk: skip probing SoC environment for CN9k Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 07/28] common/cnxk: fix issues in soft expiry disable path Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 08/28] common/cnxk: convert warning to debug print Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 09/28] common/cnxk: use aggregate level rr prio from mbox Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 10/28] net/cnxk: support loopback mode on AF VF's Nithin Dabilpuram
2022-05-08  6:25   ` [PATCH v4 11/28] net/cnxk: update LBK ethdev link info Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 12/28] net/cnxk: add barrier after meta batch free in scalar Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 13/28] net/cnxk: disable default inner chksum for outb inline Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 14/28] net/cnxk: fix roundup size with transport mode Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 15/28] net/cnxk: update inline device in ethdev telemetry Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 16/28] net/cnxk: change env for debug IV Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 17/28] net/cnxk: reset offload flag if reassembly is disabled Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 18/28] net/cnxk: support decrement TTL for inline IPsec Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 19/28] net/cnxk: optimize Rx fast path for security pkts Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 20/28] net/cnxk: update olflags with L3/L4 csum offload Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 21/28] net/cnxk: add capabilities for IPsec crypto algos Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 22/28] net/cnxk: add capabilities for IPsec options Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 23/28] net/cnxk: support security stats Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 24/28] net/cnxk: add support for flow control for outbound inline Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 25/28] net/cnxk: perform early MTU setup for eventmode Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 26/28] net/cnxk: fix multi-seg extraction in vwqe path Nithin Dabilpuram
2022-05-08  6:26   ` [PATCH v4 27/28] net/cnxk: fix hotplug detach sequence for first device Nithin Dabilpuram
2022-05-08  6:26   ` Nithin Dabilpuram [this message]
2022-05-08  7:48 ` [PATCH v5 01/28] common/cnxk: add multi channel support for SDP send queues Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 02/28] net/cnxk: add receive channel backpressure for SDP Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 03/28] common/cnxk: add new pkind for CPT when ts is enabled Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 04/28] common/cnxk: support to configure the ts pkind in CPT Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 05/28] common/cnxk: fix SQ flush sequence Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 06/28] common/cnxk: skip probing SoC environment for CN9k Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 07/28] common/cnxk: fix issues in soft expiry disable path Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 08/28] common/cnxk: convert warning to debug print Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 09/28] common/cnxk: use aggregate level rr prio from mbox Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 10/28] net/cnxk: support loopback mode on AF VF's Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 11/28] net/cnxk: update LBK ethdev link info Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 12/28] net/cnxk: add barrier after meta batch free in scalar Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 13/28] net/cnxk: disable default inner chksum for outb inline Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 14/28] net/cnxk: fix roundup size with transport mode Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 15/28] net/cnxk: update inline device in ethdev telemetry Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 16/28] net/cnxk: change env for debug IV Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 17/28] net/cnxk: reset offload flag if reassembly is disabled Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 18/28] net/cnxk: support decrement TTL for inline IPsec Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 19/28] net/cnxk: optimize Rx fast path for security pkts Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 20/28] net/cnxk: update olflags with L3/L4 csum offload Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 21/28] net/cnxk: add capabilities for IPsec crypto algos Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 22/28] net/cnxk: add capabilities for IPsec options Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 23/28] net/cnxk: support security stats Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 24/28] net/cnxk: add support for flow control for outbound inline Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 25/28] net/cnxk: perform early MTU setup for eventmode Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 26/28] net/cnxk: fix multi-seg extraction in vwqe path Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 27/28] net/cnxk: fix hotplug detach sequence for first device Nithin Dabilpuram
2022-05-08  7:48   ` [PATCH v5 28/28] common/cnxk: add support for per-port RQ in inline device Nithin Dabilpuram
2022-05-10 14:31     ` Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220508062616.3398-28-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=mdr@ashroe.eu \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).