DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution
Date: Fri, 19 Jul 2024 15:31:22 +0530	[thread overview]
Message-ID: <20240719100126.1150373-26-g.singh@nxp.com> (raw)
In-Reply-To: <20240719100126.1150373-1-g.singh@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

Fix issue of ERRATA 050757/050265 workaround which is not effective
in burst mode.

SDF/DDF is referred by first entry of compound frame table, move the DF to
compound frame table description which is suitable to adapt single copy
and SG/burst copy.

Fix SG issue which was caused by memset clearing phy address of SGE in
compound frame table.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 215 +++++++++++++++++------------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 +-
 2 files changed, 107 insertions(+), 115 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 1c8334291a..690ad5a6ff 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -11,7 +11,10 @@
 
 static int s_data_validation;
 static int s_hw_err_check;
-static int s_sg_disable = 1;
+static int s_sg_enable = 1;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+static int s_pci_read = 1;
+#endif
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -126,10 +129,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	struct fsl_qdma_comp_cmd_desc *ccdf;
 	uint16_t i, j;
 	struct fsl_qdma_cmpd_ft *ft;
-	struct fsl_qdma_df *df;
 
 	for (i = 0; i < queue->n_cq; i++) {
-		dma_addr_t phy_ft = 0, phy_df = 0;
+		dma_addr_t phy_ft = 0;
 
 		queue->ft[i] = dma_pool_alloc(NULL,
 			sizeof(struct fsl_qdma_cmpd_ft),
@@ -156,25 +158,14 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
 		queue->ft[i]->phy_dsge = phy_ft +
 			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
-
-		queue->df[i] = dma_pool_alloc(NULL,
-			sizeof(struct fsl_qdma_df),
-			RTE_CACHE_LINE_SIZE, &phy_df);
-		if (!queue->df[i]) {
-			rte_free(queue->ft[i]);
-			queue->ft[i] = NULL;
-			goto fail;
-		}
-
-		memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
-		memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
 
 		ft = queue->ft[i];
-		df = queue->df[i];
-		sdf = &df->sdf;
-		ddf = &df->ddf;
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_sge_addr_set64(&ft->desc_buf, phy_df);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
 		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
 
@@ -198,10 +189,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	return 0;
 
 fail:
-	for (j = 0; j < i; j++) {
+	for (j = 0; j < i; j++)
 		rte_free(queue->ft[j]);
-		rte_free(queue->df[j]);
-	}
 
 	return -ENOMEM;
 }
@@ -247,23 +236,12 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
 	}
-	sprintf(nm, "Descriptor Buf_%d_%d",
-		block_id, queue_id);
-	cmd_queue->df = rte_zmalloc(nm,
-			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!cmd_queue->df) {
-		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->ft);
-		rte_free(cmd_queue->cq);
-		return -ENOMEM;
-	}
 	sprintf(nm, "Pending_desc_%d_%d",
 		block_id, queue_id);
 	cmd_queue->pending_desc = rte_zmalloc(nm,
 		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
 	if (!cmd_queue->pending_desc) {
 		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->df);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
@@ -278,7 +256,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-desc_ring_%d_%d",
@@ -292,7 +269,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-pool-desc_ring_%d_%d",
@@ -307,7 +283,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 
@@ -320,7 +295,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 static void
 fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 {
-	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
 	rte_free(queue->pending_desc);
@@ -664,8 +638,30 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ft = fsl_queue->ft[fsl_queue->ci];
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -745,7 +741,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
 	uint8_t *block = fsl_queue->block_vir, i;
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
@@ -758,74 +754,10 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	struct fsl_qdma_sdf *sdf;
 #endif
 
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
-
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = &fsl_queue->df[fsl_queue->ci]->sdf;
-	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->prefetch = 1;
-#endif
-#endif
-
-	if (num == 1) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-		if (fsl_queue->pending_desc[start].len >
-			FSL_QDMA_CMD_SSS_DISTANCE) {
-			sdf->ssen = 1;
-			sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-			sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-		} else {
-			sdf->sss = 0;
-			sdf->ssd = 0;
-		}
-#endif
-		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-			fsl_queue->pending_desc[start].dst,
-			fsl_queue->pending_desc[start].src,
-			fsl_queue->pending_desc[start].len);
-		if (!ret) {
-			fsl_queue->pending_start =
-				(start + 1) & (fsl_queue->pending_max - 1);
-			fsl_queue->pending_num = 0;
-		}
-		return ret;
-	} else if (s_sg_disable) {
-		while (fsl_queue->pending_num > 0) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-			if (fsl_queue->pending_desc[start].len >
-				FSL_QDMA_CMD_SSS_DISTANCE) {
-				sdf->ssen = 1;
-				sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-				sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-			} else {
-				sdf->sss = 0;
-				sdf->ssd = 0;
-			}
-#endif
-			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-				fsl_queue->pending_desc[start].dst,
-				fsl_queue->pending_desc[start].src,
-				fsl_queue->pending_desc[start].len);
-			if (!ret) {
-				start = (start + 1) &
-					(fsl_queue->pending_max - 1);
-				fsl_queue->pending_start = start;
-				fsl_queue->pending_num--;
-			} else {
-				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
-					ret);
-				return -EIO;
-			}
-		}
 
-		return 0;
-	}
 	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
 	csgf_src->extion = 1;
 	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
@@ -849,13 +781,21 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	if (total_len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->ssen = 1;
-		sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-		sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-	} else {
-		sdf->sss = 0;
-		sdf->ssd = 0;
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
 	}
 #endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
@@ -875,6 +815,51 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	return 0;
 }
 
+static int
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+{
+	uint16_t start = fsl_queue->pending_start;
+	int ret;
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	} else if (!s_sg_enable) {
+		while (fsl_queue->pending_num > 0) {
+			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+				fsl_queue->pending_desc[start].dst,
+				fsl_queue->pending_desc[start].src,
+				fsl_queue->pending_desc[start].len);
+			if (!ret) {
+				start = (start + 1) &
+					(fsl_queue->pending_max - 1);
+				fsl_queue->pending_start = start;
+				fsl_queue->pending_num--;
+			} else {
+				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
+					ret);
+				return -EIO;
+			}
+		}
+
+		return 0;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
+
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 	__rte_unused uint32_t info_sz)
@@ -1276,6 +1261,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int regs_size;
 	int ret;
 	uint32_t i, j, k;
+	char *penv;
 
 	if (getenv("DPAA_QDMA_DATA_VALIDATION"))
 		s_data_validation = 1;
@@ -1283,8 +1269,15 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (getenv("DPAA_QDMA_HW_ERR_CHECK"))
 		s_hw_err_check = 1;
 
-	if (getenv("DPAA_QDMA_SG_DISABLE"))
-		s_sg_disable = 1;
+	penv = getenv("DPAA_QDMA_SG_ENABLE");
+	if (penv)
+		s_sg_enable = atoi(penv);
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	penv = getenv("DPAA_QDMA_PCI_READ");
+	if (penv)
+		s_pci_read = atoi(penv);
+#endif
 
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index f4535af3dd..906d452d48 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -88,9 +88,7 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CFG_SSS_OFFSET		12
-#define FSL_QDMA_CMD_SSS_STRIDE		128
-#define FSL_QDMA_CMD_SSS_DISTANCE	128
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
@@ -192,8 +190,10 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t cache_align[2];
 	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
 	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
 	uint64_t phy_ssge;
 	uint64_t phy_dsge;
+	uint64_t phy_df;
 } __rte_packed;
 
 #define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
@@ -273,7 +273,6 @@ struct fsl_qdma_queue {
 	uint8_t pending_num;
 	uint16_t complete_start;
 	dma_addr_t bus_addr;
-	struct fsl_qdma_df **df;
 	void *engine;
 };
 
-- 
2.25.1


  parent reply	other threads:[~2024-07-19 10:05 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-19 10:00 ` [PATCH 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-19 10:01 ` [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-19 10:01 ` [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-19 10:01 ` [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-19 10:01 ` [PATCH 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-19 10:01 ` [PATCH 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-19 10:01 ` [PATCH 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-19 10:01 ` [PATCH 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-19 10:01 ` [PATCH 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-19 10:01 ` [PATCH 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-19 10:01 ` [PATCH 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-19 10:01 ` [PATCH 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-19 10:01 ` [PATCH 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-19 10:01 ` Gagandeep Singh [this message]
2024-07-19 10:01 ` [PATCH 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-19 10:01 ` [PATCH 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-19 10:01 ` [PATCH 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 11:58   ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 11:58   ` [v2 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 11:58   ` [v2 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 11:58   ` [v2 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 11:58   ` [v2 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 11:58   ` [v2 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 11:58   ` [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 11:58   ` [v2 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 11:58   ` [v2 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 11:58   ` [v2 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 11:58   ` [v2 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 11:58   ` [v2 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 11:58   ` [v2 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 11:58   ` [v2 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 11:58   ` [v2 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 11:58   ` [v2 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 11:58   ` [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 11:58   ` [v2 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 11:58   ` [v2 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 11:58   ` [v2 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 11:58   ` [v2 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 11:58   ` [v2 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 11:58   ` [v2 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 11:58   ` [v2 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 11:58   ` [v2 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 11:58   ` [v2 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 11:58   ` [v2 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 11:58   ` [v2 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 11:58   ` [v2 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 11:58   ` [v2 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 20:19       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 16:39     ` [v3 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 16:39     ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 20:21       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 16:39     ` [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 16:39     ` [v3 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 16:39     ` [v3 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 16:39     ` [v3 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 16:39     ` [v3 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 16:39     ` [v3 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 16:39     ` [v3 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 16:39     ` [v3 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 16:39     ` [v3 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 16:39     ` [v3 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 16:39     ` [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 16:39     ` [v3 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 16:39     ` [v3 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 16:39     ` [v3 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 16:39     ` [v3 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 16:39     ` [v3 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 16:39     ` [v3 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 16:39     ` [v3 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 16:39     ` [v3 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 16:39     ` [v3 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 16:39     ` [v3 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 16:39     ` [v3 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 16:39     ` [v3 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 16:39     ` [v3 30/30] bus/dpaa: add port bmi stats Gagandeep Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240719100126.1150373-26-g.singh@nxp.com \
    --to=g.singh@nxp.com \
    --cc=dev@dpdk.org \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).