DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, Sachin Saxena <sachin.saxena@nxp.com>
Subject: [v2 19/30] dma/dpaa: data path optimization
Date: Mon, 22 Jul 2024 17:28:32 +0530	[thread overview]
Message-ID: <20240722115843.1830105-20-g.singh@nxp.com> (raw)
In-Reply-To: <20240722115843.1830105-1-g.singh@nxp.com>

Remove unnessary status read before every send.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 186 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 ++
 2 files changed, 101 insertions(+), 92 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8e8426b88d..4022ad6469 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -248,7 +248,8 @@ fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 }
 
 static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+					   u32 id)
 {
 	struct fsl_qdma_queue *status_head;
 	unsigned int status_size;
@@ -277,6 +278,8 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	       sizeof(struct fsl_qdma_format));
 	status_head->n_cq = status_size;
 	status_head->virt_head = status_head->cq;
+	status_head->queue_base = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	return status_head;
 }
@@ -334,12 +337,9 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
+fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
 				 enum rte_dma_status_code *status)
 {
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_format *status_addr;
 	u32 reg;
 	int count = 0;
 
@@ -348,16 +348,7 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (reg & FSL_QDMA_BSQSR_QE_BE)
 			return count;
 
-		status_addr = fsl_status->virt_head;
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
@@ -472,19 +463,37 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 {
 	void *block = fsl_queue->queue_base;
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
-	u32 reg;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
 	u32 cfg = 0;
 #endif
 
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	u32 reg;
+
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
 	 */
 	reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
+	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
+		DPAA_QDMA_ERR("QDMA Engine is busy\n");
 		return -1;
+	}
+#else
+	/* check whether critical watermark level reached,
+	 * below check is valid for only single queue per block
+	 */
+	if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
+			>= QDMA_QUEUE_CR_WM) {
+		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
+		return -1;
+	}
+#endif
+	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
+		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
+		return -1;
+	}
 
 	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
 		   QDMA_SGF_SRC_OFF;
@@ -512,19 +521,14 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 	qdma_csgf_set_len(csgf_dest, len);
 	/* This entry is the last entry. */
 	qdma_csgf_set_f(csgf_dest, len);
-	fsl_queue->virt_head++;
 	fsl_queue->ci++;
 
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) {
-		fsl_queue->virt_head = fsl_queue->cq;
+	if (fsl_queue->ci == fsl_queue->n_cq)
 		fsl_queue->ci = 0;
-	}
-
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI,
+			       block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->stats.submitted++;
 	} else {
 		fsl_queue->pending++;
@@ -618,12 +622,9 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	void *block = fsl_queue->queue_base;
-	u32 reg;
 
 	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI, block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->pending--;
 		fsl_queue->stats.submitted++;
 	}
@@ -656,44 +657,43 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			 enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	void *status = fsl_qdma->status_base;
+	int intr;
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, st);
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+#endif
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			fsl_queue->stats.errors++;
+		}
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						st);
-	fsl_queue->stats.completed += intr;
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 
-	return intr;
+	return ret;
 }
 
 
@@ -703,44 +703,46 @@ dpaa_qdma_dequeue(void *dev_private,
 		  uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	void *status = fsl_qdma->status_base;
+	int intr;
+#endif
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
+	*has_error = false;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, NULL);
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			intr = qdma_readl(status + FSL_QDMA_DEDR);
+			*has_error = true;
+			fsl_queue->stats.errors++;
+		}
 	}
-
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						NULL);
-	fsl_queue->stats.completed += intr;
+#endif
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
-	return intr;
+	return ret;
 }
 
 static int
@@ -842,7 +844,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	}
 
 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
 		if (!fsl_qdma->status[i])
 			goto mem_free;
 		j = 0;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 80366ce890..8a4517a70a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -58,11 +58,17 @@
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
+/* Update the value appropriately whenever QDMA_QUEUE_SIZE
+ * changes.
+ */
+#define FSL_QDMA_BCQMR_EI		0x20c0
+
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
 #define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -110,6 +116,7 @@
 #define QDMA_SGF_SRC_OFF		2
 #define QDMA_SGF_DST_OFF		3
 #define QDMA_DESC_OFF			1
+#define QDMA_QUEUE_CR_WM		32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
-- 
2.25.1


  parent reply	other threads:[~2024-07-22 12:03 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-19 10:00 ` [PATCH 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-19 10:01 ` [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-19 10:01 ` [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-19 10:01 ` [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-19 10:01 ` [PATCH 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-19 10:01 ` [PATCH 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-19 10:01 ` [PATCH 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-19 10:01 ` [PATCH 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-19 10:01 ` [PATCH 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-19 10:01 ` [PATCH 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-19 10:01 ` [PATCH 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-19 10:01 ` [PATCH 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-19 10:01 ` [PATCH 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-19 10:01 ` [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-19 10:01 ` [PATCH 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-19 10:01 ` [PATCH 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-19 10:01 ` [PATCH 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 11:58   ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 11:58   ` [v2 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 11:58   ` [v2 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 11:58   ` [v2 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 11:58   ` [v2 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 11:58   ` [v2 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 11:58   ` [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 11:58   ` [v2 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 11:58   ` [v2 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 11:58   ` [v2 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 11:58   ` [v2 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 11:58   ` [v2 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 11:58   ` [v2 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 11:58   ` [v2 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 11:58   ` [v2 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 11:58   ` [v2 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 11:58   ` [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 11:58   ` [v2 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 11:58   ` Gagandeep Singh [this message]
2024-07-22 11:58   ` [v2 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 11:58   ` [v2 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 11:58   ` [v2 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 11:58   ` [v2 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 11:58   ` [v2 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 11:58   ` [v2 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 11:58   ` [v2 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 11:58   ` [v2 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 11:58   ` [v2 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 11:58   ` [v2 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 11:58   ` [v2 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 20:19       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 16:39     ` [v3 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 16:39     ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 20:21       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 16:39     ` [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 16:39     ` [v3 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 16:39     ` [v3 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 16:39     ` [v3 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 16:39     ` [v3 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 16:39     ` [v3 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 16:39     ` [v3 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 16:39     ` [v3 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 16:39     ` [v3 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 16:39     ` [v3 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 16:39     ` [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 16:39     ` [v3 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 16:39     ` [v3 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 16:39     ` [v3 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 16:39     ` [v3 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 16:39     ` [v3 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 16:39     ` [v3 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 16:39     ` [v3 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 16:39     ` [v3 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 16:39     ` [v3 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 16:39     ` [v3 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 16:39     ` [v3 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 16:39     ` [v3 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 16:39     ` [v3 30/30] bus/dpaa: add port bmi stats Gagandeep Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240722115843.1830105-20-g.singh@nxp.com \
    --to=g.singh@nxp.com \
    --cc=dev@dpdk.org \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).