DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v3 23/30] dma/dpaa: block dequeue
Date: Mon, 22 Jul 2024 22:09:23 +0530	[thread overview]
Message-ID: <20240722163930.2171568-24-g.singh@nxp.com> (raw)
In-Reply-To: <20240722163930.2171568-1-g.singh@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

Perform block dequeue to identify which queue of this block is completed.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 252 ++++++++++++++++-------------------
 1 file changed, 116 insertions(+), 136 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index f1ad60d1f2..de5ecc7d0b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -442,86 +442,6 @@ fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
 	}
 }
 
-static int
-fsl_qdma_queue_drain(struct fsl_qdma_queue *fsl_queue)
-{
-	uint32_t reg;
-	int count = 0, ret;
-	uint8_t *block = fsl_queue->block_vir;
-	uint16_t *dq_complete = NULL, drain_num = 0;
-	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
-
-	while (1) {
-		if (rte_ring_free_count(fsl_queue->complete_pool) <
-			(FSL_QDMA_SG_MAX_ENTRY * 2))
-			break;
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			break;
-
-		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
-		ret = rte_ring_dequeue(fsl_queue->complete_burst,
-			(void **)&dq_complete);
-		if (ret)
-			DPAA_QDMA_ERR("DQ desc number failed!\n");
-
-		ret = rte_ring_dequeue_bulk(fsl_queue->complete_desc,
-			(void **)desc, *dq_complete, NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		fsl_qdma_data_validation(desc, *dq_complete, fsl_queue);
-
-		ret = rte_ring_enqueue_bulk(fsl_queue->complete_pool,
-			(void **)desc, (*dq_complete), NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("EQ %d descs to return queue failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		drain_num += *dq_complete;
-		fsl_queue->complete_start =
-			(fsl_queue->complete_start + (*dq_complete)) &
-			(fsl_queue->pending_max - 1);
-		fsl_queue->stats.completed++;
-
-		count++;
-	}
-
-	return drain_num;
-}
-
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
-	const uint16_t nb_cpls, uint16_t *last_idx,
-	enum rte_dma_status_code *status)
-{
-	int ret;
-	uint16_t dq_num = 0, i;
-	struct fsl_qdma_desc *desc_complete[nb_cpls];
-
-	ret = fsl_qdma_queue_drain(fsl_queue);
-	if (ret < 0) {
-		DPAA_QDMA_ERR("Drain TX%d/Q%d failed!(%d)",
-			fsl_queue->block_id, fsl_queue->queue_id,
-			ret);
-	}
-
-	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
-		(void **)desc_complete, nb_cpls, NULL);
-	for (i = 0; i < dq_num; i++)
-		last_idx[i] = desc_complete[i]->flag;
-
-	if (status) {
-		for (i = 0; i < dq_num; i++)
-			status[i] = RTE_DMA_STATUS_SUCCESSFUL;
-	}
-
-	return dq_num;
-}
-
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -682,13 +602,90 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
+{
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete = NULL;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
+
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
+
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+			cmd_queue->stats.completed++;
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret)
+				DPAA_QDMA_ERR("DQ desc number failed!\n");
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
+					*dq_complete, ret);
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("Failed desc eq %d!=%d to %s\n",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
+}
+
 static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
-	int overflow = 0, drain;
-	uint32_t reg, check_num, drain_num;
+	int overflow = 0;
+	uint32_t reg;
+	uint16_t blk_drain, check_num, drain_num;
 	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
@@ -711,11 +708,12 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	drain_num = 0;
 
 drain_again:
-	drain = fsl_qdma_queue_drain(fsl_queue);
-	if (drain <= 0) {
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
 		drain_num++;
 		if (drain_num > 100) {
-			DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 				fsl_queue->block_id, fsl_queue->queue_id,
 				st->submitted - st->completed);
 			return -ENOSPC;
@@ -724,7 +722,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	}
 	check_num++;
 	if (check_num > 10) {
-		DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+		DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 			fsl_queue->block_id, fsl_queue->queue_id,
 			st->submitted - st->completed);
 		return -ENOSPC;
@@ -1059,39 +1057,6 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
-static uint16_t
-dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
-	uint8_t block_id)
-{
-	struct fsl_qdma_status_queue *stat_queue;
-	struct fsl_qdma_queue *cmd_queue;
-	struct fsl_qdma_comp_cmd_desc *cq;
-	uint16_t start, count = 0;
-	uint8_t qid;
-	int ret;
-
-	stat_queue = &fsl_qdma->stat_queues[block_id];
-	cq = stat_queue->cq;
-	start = stat_queue->complete;
-
-	do {
-		ret = qdma_ccdf_get_queue(&cq[start], &qid);
-		if (ret == true) {
-			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
-			cmd_queue->stats.completed++;
-			start++;
-			if (unlikely(start == stat_queue->n_cq))
-				start = 0;
-			count++;
-		} else {
-			break;
-		}
-	} while (1);
-	stat_queue->complete = start;
-
-	return count;
-}
-
 static int
 dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
 {
@@ -1164,22 +1129,32 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
 		return 0;
 	}
 
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, st);
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1187,7 +1162,7 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			fsl_queue->stats.errors++;
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static uint16_t
@@ -1196,9 +1171,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -1207,13 +1184,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	}
 
 	*has_error = false;
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, NULL);
-	}
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1224,7 +1204,7 @@ dpaa_qdma_dequeue(void *dev_private,
 		}
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static int
-- 
2.25.1


  parent reply	other threads:[~2024-07-22 16:43 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-19 10:00 ` [PATCH 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-19 10:01 ` [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-19 10:01 ` [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-19 10:01 ` [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-19 10:01 ` [PATCH 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-19 10:01 ` [PATCH 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-19 10:01 ` [PATCH 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-19 10:01 ` [PATCH 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-19 10:01 ` [PATCH 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-19 10:01 ` [PATCH 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-19 10:01 ` [PATCH 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-19 10:01 ` [PATCH 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-19 10:01 ` [PATCH 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-19 10:01 ` [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-19 10:01 ` [PATCH 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-19 10:01 ` [PATCH 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-19 10:01 ` [PATCH 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 11:58   ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 11:58   ` [v2 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 11:58   ` [v2 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 11:58   ` [v2 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 11:58   ` [v2 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 11:58   ` [v2 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 11:58   ` [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 11:58   ` [v2 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 11:58   ` [v2 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 11:58   ` [v2 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 11:58   ` [v2 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 11:58   ` [v2 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 11:58   ` [v2 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 11:58   ` [v2 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 11:58   ` [v2 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 11:58   ` [v2 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 11:58   ` [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 11:58   ` [v2 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 11:58   ` [v2 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 11:58   ` [v2 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 11:58   ` [v2 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 11:58   ` [v2 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 11:58   ` [v2 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 11:58   ` [v2 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 11:58   ` [v2 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 11:58   ` [v2 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 11:58   ` [v2 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 11:58   ` [v2 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 11:58   ` [v2 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 11:58   ` [v2 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 20:19       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 16:39     ` [v3 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 16:39     ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 20:21       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 16:39     ` [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 16:39     ` [v3 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 16:39     ` [v3 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 16:39     ` [v3 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 16:39     ` [v3 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 16:39     ` [v3 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 16:39     ` [v3 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 16:39     ` [v3 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 16:39     ` [v3 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 16:39     ` [v3 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 16:39     ` [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 16:39     ` [v3 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 16:39     ` [v3 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 16:39     ` [v3 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 16:39     ` [v3 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 16:39     ` [v3 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 16:39     ` Gagandeep Singh [this message]
2024-07-22 16:39     ` [v3 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 16:39     ` [v3 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 16:39     ` [v3 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 16:39     ` [v3 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 16:39     ` [v3 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 16:39     ` [v3 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 16:39     ` [v3 30/30] bus/dpaa: add port bmi stats Gagandeep Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240722163930.2171568-24-g.singh@nxp.com \
    --to=g.singh@nxp.com \
    --cc=dev@dpdk.org \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).