DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param
@ 2024-07-19 10:00 Gagandeep Singh
  2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
                   ` (29 more replies)
  0 siblings, 30 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:00 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

struct {
	uint64_t coreid : 4; /**--rbp.sportid / rbp.dportid*/
	uint64_t pfid : 8; /**--rbp.spfid / rbp.dpfid*/
	uint64_t vfen : 1; /**--rbp.svfa / rbp.dvfa*/
	uint64_t vfid : 16; /**--rbp.svfid / rbp.dvfid*/
	.....
} pcie;

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  | 29 ++++++---
 drivers/dma/dpaa2/dpaa2_qdma.c                | 59 +++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h                | 38 +++++++++++-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        | 55 +----------------
 drivers/dma/dpaa2/version.map                 |  1 -
 5 files changed, 100 insertions(+), 82 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48ffb1b46e..7528b610e1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
  *
  */
 #ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
 			uint32_t saddr_hi;
 
 			uint32_t len_sl:18;
-			uint32_t rsv1:14;
-
+			uint32_t rsv13:2;
+			uint32_t svfid:6;
+			uint32_t rsv12:2;
+			uint32_t spfid:2;
+			uint32_t rsv1:2;
 			uint32_t sportid:4;
-			uint32_t rsv2:22;
+			uint32_t rsv2:1;
+			uint32_t sca:1;
+			uint32_t sat:2;
+			uint32_t sattr:3;
+			uint32_t svfa:1;
+			uint32_t stc:3;
 			uint32_t bmt:1;
-			uint32_t rsv3:1;
+			uint32_t dvfid:6;
+			uint32_t rsv3:2;
+			uint32_t dpfid:2;
+			uint32_t rsv31:2;
 			uint32_t fmt:2;
 			uint32_t sl:1;
 			uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
 			uint32_t acc_err:4;
 			uint32_t rsv5:4;
 			uint32_t ser:1;
-			uint32_t rsv6:3;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
 			uint32_t wrttype:4;
 			uint32_t dqos:3;
 			uint32_t drbp:1;
 			uint32_t dlwc:2;
-			uint32_t rsv7:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
 			uint32_t rdttype:4;
 			uint32_t sqos:3;
 			uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
 			uint32_t saddr_lo;
 
 			uint32_t saddr_hi:17;
-			uint32_t rsv1:15;
+			uint32_t rsv1_att:15;
 
 			uint32_t len;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 2c91ceec13..5954b552b5 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
 static inline int
 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
 		     uint32_t len, struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_rbp *rbp, int ser)
+		     struct dpaa2_qdma_rbp *rbp, int ser)
 {
 	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
 	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 			uint64_t fle_iova,
-			struct rte_dpaa2_qdma_rbp *rbp,
+			struct dpaa2_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags, uint32_t fmt)
 {
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* source */
 		sdd->read_cmd.portid = rbp->sportid;
 		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->svfid;
 
 		if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* destination */
 		sdd->write_cmd.portid = rbp->dportid;
 		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->dvfid;
 
 		if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
 		     struct rte_dpaa2_qdma_job **job,
 		     uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	size_t iova;
 	int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
 				  struct rte_dpaa2_qdma_job **job,
 				  uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
 			   struct rte_dpaa2_qdma_job **job,
 			   uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
 			struct rte_dpaa2_qdma_job **job,
 			uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	void *elem;
 	struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
 	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
 }
 
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-				struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+	const struct rte_dma_vchan_conf *conf)
 {
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.dportid = conf->dst_port.pcie.coreid;
+		vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+		if (conf->dst_port.pcie.vfen) {
+			vq->rbp.dvfa = 1;
+			vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+		}
+		vq->rbp.drbp = 1;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.sportid = conf->src_port.pcie.coreid;
+		vq->rbp.spfid = conf->src_port.pcie.pfid;
+		if (conf->src_port.pcie.vfen) {
+			vq->rbp.svfa = 1;
+			vq->rbp.dvfid = conf->src_port.pcie.vfid;
+		}
+		vq->rbp.srbp = 1;
+	}
 
-	memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
-			sizeof(struct rte_dpaa2_qdma_rbp));
+	return 0;
 }
 
 static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
-	int sg_enable = 0;
+	int sg_enable = 0, ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
+	ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+	if (ret)
+		return ret;
+
 	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
 		sg_enable = 1;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 5941b5a5d3..811906fcbc 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
 	};
 } __rte_packed;
 
+struct dpaa2_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	uint32_t dvfa:1;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	uint32_t svfa:1;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:2;
+};
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
 	/** Route by port */
-	struct rte_dpaa2_qdma_rbp rbp;
+	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
 	/** States if this vq has exclusively associated hw queue */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 5a8da46d12..b0bf9d8bcc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -13,42 +13,6 @@
 /** States if the destination addresses is physical. */
 #define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
 
-struct rte_dpaa2_qdma_rbp {
-	uint32_t use_ultrashort:1;
-	uint32_t enable:1;
-	/**
-	 * dportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t dportid:4;
-	uint32_t dpfid:2;
-	uint32_t dvfid:6;
-	/*using route by port for destination */
-	uint32_t drbp:1;
-	/**
-	 * sportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t sportid:4;
-	uint32_t spfid:2;
-	uint32_t svfid:6;
-	/* using route by port for source */
-	uint32_t srbp:1;
-	/* Virtual Function Active */
-	uint32_t vfa:1;
-	uint32_t rsv:3;
-};
-
 /** Determines a QDMA job */
 struct rte_dpaa2_qdma_job {
 	/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
 	 */
 	uint16_t status;
 	uint16_t vq_id;
+	uint64_t cnxt;
 	/**
 	 * FLE pool element maintained by user, in case no qDMA response.
 	 * Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param rbp_config
- *   Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_rbp *rbp_config);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
index 713ed41f0c..eb012cfbfc 100644
--- a/drivers/dma/dpaa2/version.map
+++ b/drivers/dma/dpaa2/version.map
@@ -10,5 +10,4 @@ EXPERIMENTAL {
 	rte_dpaa2_qdma_copy_multi;
 	rte_dpaa2_qdma_vchan_fd_us_enable;
 	rte_dpaa2_qdma_vchan_internal_sg_enable;
-	rte_dpaa2_qdma_vchan_rbp_enable;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 02/30] dma/dpaa2: support multiple HW queues
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-07-19 10:00 ` Gagandeep Singh
  2024-07-19 10:00 ` [PATCH 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
                   ` (28 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:00 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Initialize and Configure queues of dma device according to hw queues
supported from mc bus.
Because multiple queues per device are supported, virt queues
implementation are dropped.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 312 +++++++++++++++------------------
 drivers/dma/dpaa2/dpaa2_qdma.h |   6 +-
 2 files changed, 140 insertions(+), 178 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5954b552b5..945ba71e4a 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -478,9 +478,9 @@ dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
 
 static inline uint16_t
 dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-			     const struct qbman_fd *fd,
-			     struct rte_dpaa2_qdma_job **job,
-			     uint16_t *nb_jobs)
+	const struct qbman_fd *fd,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t *nb_jobs)
 {
 	struct qbman_fle *fle;
 	struct rte_dpaa2_qdma_job **ppjob = NULL;
@@ -512,9 +512,9 @@ dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
 
 static inline uint16_t
 dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-			 const struct qbman_fd *fd,
-			 struct rte_dpaa2_qdma_job **job,
-			 uint16_t *nb_jobs)
+	const struct qbman_fd *fd,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t *nb_jobs)
 {
 	struct qbman_fle *fle;
 	struct rte_dpaa2_qdma_job **ppjob = NULL;
@@ -548,12 +548,12 @@ dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
 /* Function to receive a QDMA job for a given device and queue*/
 static int
 dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-				     uint16_t *vq_id,
-				     struct rte_dpaa2_qdma_job **job,
-				     uint16_t nb_jobs)
+	uint16_t *vq_id,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
@@ -562,7 +562,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
+	uint16_t rx_fqid;
 	int ret, pull_size;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
@@ -575,15 +575,17 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
 				rte_gettid());
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
+	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
+	rx_fqid = rxq->fqid;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_jobs > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_jobs;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -697,12 +699,12 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 
 static int
 dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-					uint16_t *vq_id,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs)
+	uint16_t *vq_id,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
@@ -710,7 +712,7 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
+	uint16_t rx_fqid;
 	int ret, next_pull, num_pulled = 0;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
@@ -725,15 +727,15 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
 				rte_gettid());
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	rxq = &(dpdmai_dev->rx_queue[0]);
+	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
+	rx_fqid = rxq->fqid;
 
 	do {
 		dq_storage = rxq->q_storage->dq_storage[0];
@@ -810,7 +812,7 @@ dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
 			uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
 	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
 	struct qbman_eq_desc eqdesc;
 	struct qbman_swp *swp;
@@ -931,8 +933,8 @@ dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 
 static int
 dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-		   rte_iova_t src, rte_iova_t dst,
-		   uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -966,8 +968,8 @@ dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
 
 int
 rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-			  struct rte_dpaa2_qdma_job **jobs,
-			  uint16_t nb_cpls)
+	struct rte_dpaa2_qdma_job **jobs,
+	uint16_t nb_cpls)
 {
 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
 	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
@@ -978,14 +980,11 @@ rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
 }
 
 static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
-			 struct qdma_virt_queue *qdma_vq,
-			 struct rte_dpaa2_qdma_job **jobs,
-			 uint16_t nb_jobs)
+dpaa2_qdma_dequeue_multi(struct qdma_virt_queue *qdma_vq,
+	struct rte_dpaa2_qdma_job **jobs,
+	uint16_t nb_jobs)
 {
-	struct qdma_virt_queue *temp_qdma_vq;
-	int ring_count;
-	int ret = 0, i;
+	int ret;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
 		/** Make sure there are enough space to get jobs.*/
@@ -1002,42 +1001,12 @@ dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
 		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
 				qdma_vq->num_dequeues), nb_jobs);
 
-	if (qdma_vq->exclusive_hw_queue) {
-		/* In case of exclusive queue directly fetch from HW queue */
-		ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-		if (ret < 0) {
-			DPAA2_QDMA_ERR(
-				"Dequeue from DPDMAI device failed: %d", ret);
-			return ret;
-		}
-	} else {
-		uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
-
-		/* Get the QDMA completed jobs from the software ring.
-		 * In case they are not available on the ring poke the HW
-		 * to fetch completed jobs from corresponding HW queues
-		 */
-		ring_count = rte_ring_count(qdma_vq->status_ring);
-		if (ring_count < nb_jobs) {
-			ret = qdma_vq->dequeue_job(qdma_vq,
-					temp_vq_id, jobs, nb_jobs);
-			for (i = 0; i < ret; i++) {
-				temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
-				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(jobs[i]));
-			}
-			ring_count = rte_ring_count(
-					qdma_vq->status_ring);
-		}
-
-		if (ring_count) {
-			/* Dequeue job from the software ring
-			 * to provide to the user
-			 */
-			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-						    (void **)jobs,
-						    ring_count, NULL);
-		}
+	ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
+	if (ret < 0) {
+		DPAA2_QDMA_ERR("Dequeue from DMA%d-q%d failed(%d)",
+			qdma_vq->dpdmai_dev->dpdmai_id,
+			qdma_vq->vq_id, ret);
+		return ret;
 	}
 
 	qdma_vq->num_dequeues += ret;
@@ -1046,9 +1015,9 @@ dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
 
 static uint16_t
 dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			  const uint16_t nb_cpls,
-			  uint16_t *last_idx,
-			  enum rte_dma_status_code *st)
+	const uint16_t nb_cpls,
+	uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1056,7 +1025,7 @@ dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
 	int ret, i;
 
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	ret = dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
 
 	for (i = 0; i < ret; i++)
 		st[i] = jobs[i]->status;
@@ -1071,8 +1040,8 @@ dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
-		   uint16_t vchan, const uint16_t nb_cpls,
-		   uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1082,7 +1051,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 
 	RTE_SET_USED(has_error);
 
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
+	ret = dpaa2_qdma_dequeue_multi(qdma_vq,
 				jobs, nb_cpls);
 
 	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
@@ -1103,16 +1072,15 @@ rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 
-	return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	return dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
 }
 
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
-		    struct rte_dma_info *dev_info,
-		    uint32_t info_sz)
+	struct rte_dma_info *dev_info,
+	uint32_t info_sz __rte_unused)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
 			     RTE_DMA_CAPA_MEM_TO_DEV |
@@ -1120,7 +1088,7 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 			     RTE_DMA_CAPA_DEV_TO_MEM |
 			     RTE_DMA_CAPA_SILENT |
 			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
 
@@ -1129,12 +1097,13 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 
 static int
 dpaa2_qdma_configure(struct rte_dma_dev *dev,
-		     const struct rte_dma_conf *dev_conf,
-		     uint32_t conf_sz)
+	const struct rte_dma_conf *dev_conf,
+	uint32_t conf_sz)
 {
 	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint16_t i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1142,9 +1111,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
 	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before config.");
-		return -1;
+		DPAA2_QDMA_ERR("%s Not stopped, configure failed.",
+			dev->data->dev_name);
+		return -EBUSY;
 	}
 
 	/* Allocate Virtual Queues */
@@ -1156,6 +1125,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
 		return -ENOMEM;
 	}
+	for (i = 0; i < dev_conf->nb_vchans; i++)
+		qdma_dev->vqs[i].vq_id = i;
+
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
 
 	return 0;
@@ -1257,13 +1229,12 @@ dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 
 static int
 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
-		       const struct rte_dma_vchan_conf *conf,
-		       uint32_t conf_sz)
+	const struct rte_dma_vchan_conf *conf,
+	uint32_t conf_sz)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
-	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
 	int sg_enable = 0, ret;
@@ -1301,20 +1272,6 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
 	}
 
-	if (qdma_dev->num_vqs == 1)
-		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
-	else {
-		/* Allocate a Ring for Virtual Queue in VQ mode */
-		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
-			 dev->data->dev_id, vchan);
-		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
-			conf->nb_desc, rte_socket_id(), 0);
-		if (!qdma_dev->vqs[vchan].status_ring) {
-			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			return rte_errno;
-		}
-	}
-
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
@@ -1410,8 +1367,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
 	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before reset.");
+		DPAA2_QDMA_ERR("%s Not stopped, reset failed.",
+			dev->data->dev_name);
 		return -EBUSY;
 	}
 
@@ -1424,10 +1381,6 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 		}
 	}
 
-	/* Reset and free virtual queues */
-	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		rte_ring_free(qdma_dev->vqs[i].status_ring);
-	}
 	rte_free(qdma_dev->vqs);
 	qdma_dev->vqs = NULL;
 
@@ -1504,29 +1457,35 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	int ret;
+	struct dpaa2_queue *rxq;
+	int ret, i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			     dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("dmdmai disable failed");
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) disable failed",
+			dpdmai_dev->dpdmai_id);
+	}
 
 	/* Set up the DQRR storage for Rx */
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
-
-	if (rxq->q_storage) {
-		dpaa2_free_dq_storage(rxq->q_storage);
-		rte_free(rxq->q_storage);
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+		}
 	}
 
 	/* Close the device at underlying layer*/
 	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("Failure closing dpdmai device");
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed",
+			dpdmai_dev->dpdmai_id);
+	}
 
-	return 0;
+	return ret;
 }
 
 static int
@@ -1538,80 +1497,87 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 	struct dpdmai_rx_queue_attr rx_attr;
 	struct dpdmai_tx_queue_attr tx_attr;
 	struct dpaa2_queue *rxq;
-	int ret;
+	int ret, i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	/* Open DPDMAI device */
 	dpdmai_dev->dpdmai_id = dpdmai_id;
 	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
-					  RTE_CACHE_LINE_SIZE);
+	dpdmai_dev->qdma_dev = rte_malloc(NULL,
+		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
 	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
 		return ret;
 	}
 
 	/* Get DPDMAI attributes */
 	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				    dpdmai_dev->token, &attr);
+			dpdmai_dev->token, &attr);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
-			       ret);
+		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
 		goto init_err;
 	}
 	dpdmai_dev->num_queues = attr.num_of_queues;
 
-	/* Set up Rx Queue */
-	memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-	ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				  CMD_PRI_LOW,
-				  dpdmai_dev->token,
-				  0, 0, &rx_queue_cfg);
-	if (ret) {
-		DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
-			       ret);
-		goto init_err;
-	}
+	/* Set up Rx Queues */
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			goto init_err;
+		}
 
-	/* Allocate DQ storage for the DPDMAI Rx queues */
-	rxq = &(dpdmai_dev->rx_queue[0]);
-	rxq->q_storage = rte_malloc("dq_storage",
-				    sizeof(struct queue_storage_info_t),
-				    RTE_CACHE_LINE_SIZE);
-	if (!rxq->q_storage) {
-		DPAA2_QDMA_ERR("q_storage allocation failed");
-		ret = -ENOMEM;
-		goto init_err;
-	}
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq = &dpdmai_dev->rx_queue[i];
+		rxq->q_storage = rte_malloc("dq_storage",
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s DQ info(Q%d) alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto init_err;
+		}
 
-	memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-	ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
-		goto init_err;
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s DQ storage(Q%d) alloc failed(%d)",
+				dev->data->dev_name, i, ret);
+			goto init_err;
+		}
 	}
 
-	/* Get Rx and Tx queues FQID */
-	ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &rx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			goto init_err;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
 
-	ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &tx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
+		ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			goto init_err;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
 	}
-	dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
 
 	/* Enable the device */
 	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 811906fcbc..786dcb9308 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -18,7 +18,7 @@
 
 #define DPAA2_QDMA_MAX_SG_NB 64
 
-#define DPAA2_DPDMAI_MAX_QUEUES	1
+#define DPAA2_DPDMAI_MAX_QUEUES	16
 
 /** FLE single job pool size: job pointer(uint64_t) +
  * 3 Frame list + 2 source/destination descriptor.
@@ -245,8 +245,6 @@ typedef int (qdma_enqueue_multijob_t)(
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
-	/** Status ring of the virtual queue */
-	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
@@ -255,8 +253,6 @@ struct qdma_virt_queue {
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
-	/** States if this vq has exclusively associated hw queue */
-	uint8_t exclusive_hw_queue;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 03/30] dma/dpaa2: adapt DMA driver API
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
@ 2024-07-19 10:00 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 04/30] dma/dpaa2: multiple process support Gagandeep Singh
                   ` (27 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:00 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

1) Support DMA single copy and SG copy.
2) Silent mode support.

Add index combined with length field.
For Silent mode, this index is used to notify DMA driver
which inner descriptor should be used.
For none silent mode, this index is used to notify user
which descriptor is completed.
In addition, because dpaa2 qdma is not able to preserve order,
"rte_dma_completed_t" returns multiple indexes instead of last index.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 1667 +++++++++++-------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  126 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  119 +-
 drivers/dma/dpaa2/version.map          |   13 -
 4 files changed, 799 insertions(+), 1126 deletions(-)
 delete mode 100644 drivers/dma/dpaa2/version.map

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 945ba71e4a..b467845fa0 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -16,218 +16,345 @@
 
 #define DPAA2_QDMA_PREFETCH "prefetch"
 
-uint32_t dpaa2_coherent_no_alloc_cache;
-uint32_t dpaa2_coherent_alloc_cache;
+static uint32_t dpaa2_coherent_no_alloc_cache;
+static uint32_t dpaa2_coherent_alloc_cache;
 
 static inline int
-qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd,
-		     struct dpaa2_qdma_rbp *rbp, int ser)
+qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
+	const uint16_t *elem, uint16_t nb,
+	uint16_t *free_space)
 {
-	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+	if (unlikely(nb > ring->free_space))
+		return 0;
 
-	fd->simple_pci.len_sl = len;
+	if ((ring->tail + nb) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem, nb * sizeof(uint16_t));
+		ring->tail += nb;
+	} else {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem,
+			(DPAA2_QDMA_MAX_DESC - ring->tail) *
+			sizeof(uint16_t));
+		rte_memcpy(&ring->cntx_idx_ring[0],
+			&elem[DPAA2_QDMA_MAX_DESC - ring->tail],
+			(nb - DPAA2_QDMA_MAX_DESC + ring->tail) *
+			sizeof(uint16_t));
+		ring->tail = (ring->tail + nb) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space -= nb;
+	ring->nb_in_ring += nb;
 
-	fd->simple_pci.bmt = 1;
-	fd->simple_pci.fmt = 3;
-	fd->simple_pci.sl = 1;
-	fd->simple_pci.ser = ser;
+	if (free_space)
+		*free_space = ring->free_space;
 
-	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
-	fd->simple_pci.srbp = rbp->srbp;
-	if (rbp->srbp)
-		fd->simple_pci.rdttype = 0;
-	else
-		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+	return nb;
+}
 
-	/*dest is pcie memory */
-	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
-	fd->simple_pci.drbp = rbp->drbp;
-	if (rbp->drbp)
-		fd->simple_pci.wrttype = 0;
-	else
-		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+static inline int
+qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring,
+	uint16_t *elem, uint16_t max)
+{
+	int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring;
 
-	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (!ret)
+		return 0;
 
-	return 0;
+	if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			ret * sizeof(uint16_t));
+		ring->start += ret;
+	} else {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			(DPAA2_QDMA_MAX_DESC - ring->start) *
+			sizeof(uint16_t));
+		rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start],
+			&ring->cntx_idx_ring[0],
+			(ret - DPAA2_QDMA_MAX_DESC + ring->start) *
+			sizeof(uint16_t));
+		ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space += ret;
+	ring->nb_in_ring -= ret;
+
+	return ret;
 }
 
-static inline int
-qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd, int ser)
+static int
+dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq)
 {
-	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
-
-	fd->simple_ddr.len = len;
-
-	fd->simple_ddr.bmt = 1;
-	fd->simple_ddr.fmt = 3;
-	fd->simple_ddr.sl = 1;
-	fd->simple_ddr.ser = ser;
-	/**
-	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
-	 * Coherent copy of cacheable memory,
-	* lookup in downstream cache, no allocate
-	 * on miss
-	 */
-	fd->simple_ddr.rns = 0;
-	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
-	/**
-	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
-	 * Coherent write of cacheable memory,
-	 * lookup in downstream cache, no allocate on miss
-	 */
-	fd->simple_ddr.wns = 0;
-	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
+	struct qbman_eq_desc eqdesc;
+	struct qbman_swp *swp;
+	uint32_t num_to_send = 0;
+	uint16_t num_tx = 0;
+	uint32_t enqueue_loop, loop;
+	int ret;
+	struct qbman_fd *fd = qdma_vq->fd;
+	uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx;
 
-	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d",
+				rte_gettid());
+			return -EIO;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
 
-	return 0;
+	/* Prepare enqueue descriptor */
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_fq(&eqdesc, txq_id);
+	qbman_eq_desc_set_no_orp(&eqdesc, 0);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+	while (nb_fds > 0) {
+		num_to_send = (nb_fds > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_fds;
+
+		/* Enqueue the packet to the QBMAN */
+		enqueue_loop = 0;
+		loop = num_to_send;
+
+		while (enqueue_loop < loop) {
+			ret = qbman_swp_enqueue_multiple(swp,
+				&eqdesc,
+				&fd[num_tx + enqueue_loop],
+				NULL,
+				loop - enqueue_loop);
+			if (likely(ret >= 0))
+				enqueue_loop += ret;
+		}
+		num_tx += num_to_send;
+		nb_fds -= loop;
+	}
+
+	qdma_vq->num_enqueues += num_tx;
+	if (unlikely(num_tx != qdma_vq->fd_idx)) {
+		dst_idx = 0;
+		for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) {
+			rte_memcpy(&qdma_vq->fd[dst_idx],
+				&qdma_vq->fd[idx],
+				sizeof(struct qbman_fd));
+			dst_idx++;
+		}
+	}
+	qdma_vq->fd_idx -= num_tx;
+
+	return num_tx;
 }
 
 static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			uint64_t fle_iova,
-			struct dpaa2_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags, uint32_t fmt)
+fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
+	struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest,
+	uint32_t fmt)
 {
-	struct qdma_sdd *sdd;
-	uint64_t sdd_iova;
-
-	sdd = (struct qdma_sdd *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
-			QDMA_FLE_SDD_OFFSET);
-	sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
+	struct qbman_fle *fle = fle_sdd->fle;
+	struct qdma_sdd *sdd = fle_sdd->sdd;
+	uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd);
 
 	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, sdd_iova);
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)));
 
 	/* source and destination descriptor */
 	if (rbp && rbp->enable) {
 		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
 		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
 		}
-		sdd++;
 		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
 		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
 		}
-
 	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
 	}
-	fle++;
 	/* source frame list to source buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
 #endif
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
-	fle++;
 	/* destination frame list to destination buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
 #endif
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
 }
 
-static inline int
-dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
-		     struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_job **job,
-		     uint16_t nb_jobs)
+static void
+sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 {
+	uint16_t i;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+		/* source SG */
+		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+		/* destination SG */
+		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+	}
+}
+
+static void
+fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx,
+	struct qdma_virt_queue *qdma_vq)
+{
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	size_t iova;
-	int ret = 0, loop;
-	int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
-				0 : 1;
-
-	for (loop = 0; loop < nb_jobs; loop++) {
-		if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
-			iova = (size_t)job[loop]->dest;
-		else
-			iova = (size_t)job[loop]->src;
-
-		/* Set the metadata */
-		job[loop]->vq_id = qdma_vq->vq_id;
-		ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-		*ppjob = job[loop];
-
-		if ((rbp->drbp == 1) || (rbp->srbp == 1))
-			ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], rbp, ser);
-		else
-			ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], ser);
+
+	memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg));
+
+	src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge);
+	dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge);
+
+	sg_entry_pre_populate(sg_cntx);
+	fle_sdd_pre_populate(&sg_cntx->fle_sdd,
+		rbp, src_sge_iova, dst_sge_iova,
+		QBMAN_FLE_WORD4_FMT_SGE);
+}
+
+static inline uint32_t
+sg_entry_post_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i = 0, idx;
+	uint32_t total_len = 0, len;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < (nb_sge - 1); i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = len;
+
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = len;
+		total_len += len;
+		sg_cntx->cntx_idx[i] = idx;
+
+		src_sge->ctrl.f = 0;
+		dst_sge->ctrl.f = 0;
+		src_sge++;
+		dst_sge++;
 	}
 
-	return ret;
+	if (unlikely(src[i].length != dst[i].length))
+		return -ENOTSUP;
+
+	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+	idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+
+	src_sge->addr_lo = (uint32_t)src[i].addr;
+	src_sge->addr_hi = (src[i].addr >> 32);
+	src_sge->data_len.data_len_sl0 = len;
+
+	dst_sge->addr_lo = (uint32_t)dst[i].addr;
+	dst_sge->addr_hi = (dst[i].addr >> 32);
+	dst_sge->data_len.data_len_sl0 = len;
+
+	total_len += len;
+	sg_cntx->cntx_idx[i] = idx;
+	sg_cntx->job_nb = nb_sge;
+
+	src_sge->ctrl.f = QDMA_SG_F;
+	dst_sge->ctrl.f = QDMA_SG_F;
+
+	return total_len;
 }
 
-static uint32_t
-qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
-		       struct qdma_sg_entry *src_sge,
-		       struct qdma_sg_entry *dst_sge,
-		       uint16_t nb_jobs)
+static inline void
+sg_fle_post_populate(struct qbman_fle fle[],
+	size_t len)
 {
-	uint16_t i;
-	uint32_t total_len = 0;
-	uint64_t iova;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
+}
 
-	for (i = 0; i < nb_jobs; i++) {
-		/* source SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
-			src_sge->addr_lo = (uint32_t)jobs[i]->src;
-			src_sge->addr_hi = (jobs[i]->src >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
-			src_sge->addr_lo = (uint32_t)iova;
-			src_sge->addr_hi = iova >> 32;
-		}
-		src_sge->data_len.data_len_sl0 = jobs[i]->len;
+static inline uint32_t
+sg_entry_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i, idx;
+	uint32_t total_len = 0, len;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < nb_sge; i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = len;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -235,16 +362,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		/* destination SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
-			dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
-			dst_sge->addr_hi = (jobs[i]->dest >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
-			dst_sge->addr_lo = (uint32_t)iova;
-			dst_sge->addr_hi = iova >> 32;
-		}
-		dst_sge->data_len.data_len_sl0 = jobs[i]->len;
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = len;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -252,9 +372,10 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += jobs[i]->len;
+		total_len += len;
+		sg_cntx->cntx_idx[i] = idx;
 
-		if (i == (nb_jobs - 1)) {
+		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
 			dst_sge->ctrl.f = QDMA_SG_F;
 		} else {
@@ -265,327 +386,432 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 		dst_sge++;
 	}
 
+	sg_cntx->job_nb = nb_sge;
+
 	return total_len;
 }
 
-static inline int
-dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
-				  struct qbman_fd *fd,
-				  struct rte_dpaa2_qdma_job **job,
-				  uint16_t nb_jobs)
+static inline void
+fle_populate(struct qbman_fle fle[],
+	struct qdma_sdd sdd[], uint64_t sdd_iova,
+	struct dpaa2_qdma_rbp *rbp,
+	uint64_t src_iova, uint64_t dst_iova, size_t len,
+	uint32_t fmt)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
-	void *elem;
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		(DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))));
 
-	for (i = 0; i < nb_jobs; i++) {
-		elem = job[i]->usr_elem;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem);
-#endif
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+		if (rbp->srbp) {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
+		}
+		/* destination */
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
-		job[i]->vq_id = qdma_vq->vq_id;
+		if (rbp->drbp) {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
+		}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	} else {
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
+	}
+	/* source frame list to source buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+#endif
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
+	/* destination frame list to destination buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+#endif
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-				DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
+}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-			job[i]->src, job[i]->dest, job[i]->len,
-			job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
-	}
+static inline void
+fle_post_populate(struct qbman_fle fle[],
+	uint64_t src, uint64_t dest, size_t len)
+{
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-	return 0;
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
 static inline int
-dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
-			   struct qbman_fd *fd,
-			   struct rte_dpaa2_qdma_job **job,
-			   uint16_t nb_jobs)
+dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	uint16_t expected = qdma_vq->fd_idx;
 	int ret;
-	void *elem[DPAA2_QDMA_MAX_DESC];
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
 
-	ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
+	ret = dpaa2_qdma_multi_eq(qdma_vq);
+	if (likely(ret == expected))
+		return 0;
 
-	for (i = 0; i < nb_jobs; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem[i]);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
-#endif
+	return -EBUSY;
+}
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem[i] +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+static inline void
+dpaa2_qdma_fle_dump(const struct qbman_fle *fle)
+{
+	DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d",
+		fle->addr_hi, fle->addr_lo, fle->length, fle->frc,
+		fle->word4.bpid);
+	DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d",
+		fle->word4.ivp, fle->word4.bmt, fle->word4.offset,
+		fle->word4.fmt, fle->word4.sl, fle->word4.f);
+}
 
-		job[i]->vq_id = qdma_vq->vq_id;
+static inline void
+dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd)
+{
+	DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x",
+		sdd->stride, sdd->rbpcmd, sdd->cmd);
+}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+static inline void
+dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge)
+{
+	DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x",
+		sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0,
+		sge->ctrl_fields);
+}
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
-		DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
+static void
+dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
+{
+	int i;
+	const struct qdma_cntx_fle_sdd *fle_sdd;
+	const struct qdma_sdd *sdd;
+	const struct qdma_cntx_sg *cntx_sg = NULL;
+	const struct qdma_cntx_long *cntx_long = NULL;
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
+	sdd = fle_sdd->sdd;
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-				job[i]->src, job[i]->dest, job[i]->len,
-				job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
+	for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) {
+		DPAA2_QDMA_INFO("fle[%d] info:", i);
+		dpaa2_qdma_fle_dump(&fle[i]);
 	}
 
-	return 0;
+	if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
+		fle[DPAA2_QDMA_DST_FLE].word4.fmt) {
+		DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)",
+			DPAA2_QDMA_SRC_FLE,
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt,
+			DPAA2_QDMA_DST_FLE,
+			fle[DPAA2_QDMA_DST_FLE].word4.fmt);
+
+		return;
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SGE) {
+		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
+			fle_sdd);
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SBF) {
+		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
+			fle_sdd);
+	} else {
+		DPAA2_QDMA_ERR("Unsupported fle format:%d",
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
+		return;
+	}
+
+	for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) {
+		DPAA2_QDMA_INFO("sdd[%d] info:", i);
+		dpaa2_qdma_sdd_dump(&sdd[i]);
+	}
+
+	if (cntx_long) {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
+			cntx_long->cntx_idx);
+	}
+
+	if (cntx_sg) {
+		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
+			cntx_sg->job_nb);
+		if (!cntx_sg->job_nb ||
+			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			DPAA2_QDMA_ERR("Invalid SG job number:%d",
+				cntx_sg->job_nb);
+			return;
+		}
+		for (i = 0; i < cntx_sg->job_nb; i++) {
+			DPAA2_QDMA_INFO("sg[%d] src info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]);
+			DPAA2_QDMA_INFO("sg[%d] dst info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]);
+			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
+				cntx_sg->cntx_idx[i]);
+		}
+	}
 }
 
-static inline int
-dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
-			struct qbman_fd *fd,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
+static int
+dpaa2_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	void *elem;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	uint32_t cntx_idx, len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_sg *cntx_sg;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova, src, dst;
-	int ret = 0, i;
-	struct qdma_sg_entry *src_sge, *dst_sge;
-	uint32_t len, fmt, flags;
-
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
-		elem = job[0]->usr_elem;
+	struct qdma_sdd *sdd;
+
+	if (unlikely(nb_src != nb_dst))
+		return -ENOTSUP;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_dev->is_silent) {
+		cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[0].length);
+		cntx_sg = qdma_vq->cntx_sg[cntx_idx];
 	} else {
-		ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
-		if (ret) {
-			DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_sg);
+		if (ret)
 			return ret;
-		}
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	elem_iova = rte_mempool_virt2iova(elem);
+	cntx_iova = rte_mempool_virt2iova(cntx_sg);
 #else
-	elem_iova = DPAA2_VADDR_TO_IOVA(elem);
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
 #endif
 
-	/* Set the metadata */
-	/* Save job context. */
-	*((uint16_t *)
-	((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
-	ppjob = (struct rte_dpaa2_qdma_job **)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
-	for (i = 0; i < nb_jobs; i++)
-		ppjob[i] = job[i];
-
-	ppjob[0]->vq_id = qdma_vq->vq_id;
-
-	fle = (struct qbman_fle *)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-	fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	fle = cntx_sg->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_sg, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
 	DPAA2_SET_FD_ADDR(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
-		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length))
+			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
 
-	/* Populate FLE */
-	if (likely(nb_jobs > 1)) {
-		src_sge = (struct qdma_sg_entry *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
-		dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
-		src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
-		dst = src +
-			DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
-		len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
-		fmt = QBMAN_FLE_WORD4_FMT_SGE;
-		flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
+		len = sg_entry_post_populate(src, dst,
+			cntx_sg, nb_src);
+		sg_fle_post_populate(fle, len);
 	} else {
-		src = job[0]->src;
-		dst = job[0]->dest;
-		len = job[0]->len;
-		fmt = QBMAN_FLE_WORD4_FMT_SBF;
-		flags = job[0]->flags;
+		sdd = cntx_sg->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		src_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_src_entry);
+		dst_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_dst_entry);
+		len = sg_entry_populate(src, dst,
+			cntx_sg, nb_src);
+
+		fle_populate(fle, sdd, sdd_iova,
+			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
+			QBMAN_FLE_WORD4_FMT_SGE);
 	}
 
-	memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
-
-	dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-					src, dst, len, flags, fmt);
-
-	return 0;
-}
-
-static inline uint16_t
-dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
-		      const struct qbman_fd *fd,
-		      struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
-{
-	uint16_t vqid;
-	size_t iova;
-	struct rte_dpaa2_qdma_job **ppjob;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
-	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
-		iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
-				| (uint64_t)fd->simple_pci.daddr_lo);
-	else
-		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
-				| (uint64_t)fd->simple_pci.saddr_lo);
+	qdma_vq->fd_idx++;
 
-	ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-	*job = (struct rte_dpaa2_qdma_job *)*ppjob;
-	(*job)->status = (fd->simple_pci.acc_err << 8) |
-					(fd->simple_pci.error);
-	vqid = (*job)->vq_id;
-	*nb_jobs = 1;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return vqid;
+	return ret;
 }
 
-static inline uint16_t
-dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-	const struct qbman_fd *fd,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t *nb_jobs)
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	uint16_t cntx_idx;
+	uint32_t len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_long *cntx_long;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t status;
+	struct qdma_sdd *sdd;
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	memset(fd, 0, sizeof(struct qbman_fd));
 
-	*nb_jobs = 1;
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-			QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
+	cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length);
+	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length);
 
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
+	if (qdma_dev->is_silent) {
+		cntx_long = qdma_vq->cntx_long[cntx_idx];
+	} else {
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_long);
+		if (ret)
+			return ret;
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		cntx_long->cntx_idx = cntx_idx;
+	}
 
-	*job = *ppjob;
-	(*job)->status = status;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	cntx_iova = rte_mempool_virt2iova(cntx_long);
+#else
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
+#endif
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	fle = cntx_long->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_long, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	return (*job)->vq_id;
-}
+	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
 
-static inline uint16_t
-dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-	const struct qbman_fd *fd,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t *nb_jobs)
-{
-	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t i, status;
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+				&qdma_vq->rbp,
+				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+		}
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	*nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	for (i = 0; i < (*nb_jobs); i++) {
-		job[i] = ppjob[i];
-		job[i]->status = status;
+		fle_post_populate(fle, src, dst, len);
+	} else {
+		sdd = cntx_long->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_long, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
+			src, dst, len,
+			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
-	return job[0]->vq_id;
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
+
+	return ret;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
-static int
-dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-	uint16_t *vq_id,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t nb_jobs)
+static uint16_t
+dpaa2_qdma_dequeue(void *dev_private,
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *cntx_idx, bool *has_error)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
 	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
 	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
 	uint8_t status, pending;
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid;
 	int ret, pull_size;
+	struct qbman_fle *fle;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_long *cntx_long;
+	uint16_t free_space = 0, fle_elem_nb = 0;
 
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
+	if (unlikely(qdma_dev->is_silent))
+		return 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
+			DPAA2_QDMA_ERR("Allocate portal err, tid(%d)",
 				rte_gettid());
+			if (has_error)
+				*has_error = true;
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
-	rx_fqid = rxq->fqid;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ?
-		dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_cpls > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_cpls;
+	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
+	fqid = rxq->fqid;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -594,21 +820,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->last_num_pkts = pull_size;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      q_storage->last_num_pkts);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+			q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
-				get_swp_active_dqs(
-				DPAA2_PER_LCORE_DPIO->index)))
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
 				;
 			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued.QBMAN busy\n");
+				DPAA2_QDMA_DP_WARN("QBMAN busy");
 					/* Portal was busy, try again */
 				continue;
 			}
@@ -617,7 +842,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->active_dqs = dq_storage;
 		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
-				   dq_storage);
+			dq_storage);
 	}
 
 	dq_storage = q_storage->active_dqs;
@@ -631,7 +856,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
-	qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
 
@@ -664,27 +889,40 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-
-		vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
-								&num_rx_ret);
-		if (vq_id)
-			vq_id[num_rx] = vqid;
+		fle_sdd = (void *)DPAA2_GET_FD_FLC(fd);
+		fle = fle_sdd->fle;
+		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
+		fle_elem_nb++;
+		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+			QBMAN_FLE_WORD4_FMT_SGE) {
+			cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, &free_space);
+		} else {
+			cntx_long = container_of(fle_sdd,
+				struct qdma_cntx_long, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&cntx_long->cntx_idx,
+				1, &free_space);
+		}
+		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+			pending = 0;
 
 		dq_storage++;
-		num_rx += num_rx_ret;
 	} while (pending);
 
 	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 		while (!qbman_check_command_complete(
-			get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
 			;
 		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 	}
 	/* issue a volatile dequeue command for next pull */
 	while (1) {
 		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN(
-				"VDQ command is not issued. QBMAN is busy (2)\n");
+			DPAA2_QDMA_DP_WARN("QBMAN is busy (2)");
 			continue;
 		}
 		break;
@@ -694,387 +932,18 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	return num_rx;
-}
-
-static int
-dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-	uint16_t *vq_id,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq;
-	struct qbman_result *dq_storage;
-	struct qbman_pull_desc pulldesc;
-	struct qbman_swp *swp;
-	uint8_t status, pending;
-	uint8_t num_rx = 0;
-	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid;
-	int ret, next_pull, num_pulled = 0;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
-
-	next_pull = nb_jobs;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
-	rx_fqid = rxq->fqid;
+	rte_mempool_put_bulk(qdma_vq->fle_pool,
+		qdma_vq->fle_elem, fle_elem_nb);
 
-	do {
-		dq_storage = rxq->q_storage->dq_storage[0];
-		/* Prepare dequeue descriptor */
-		qbman_pull_desc_clear(&pulldesc);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
-		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
+		cntx_idx, nb_cpls);
 
-		if (next_pull > dpaa2_dqrr_size) {
-			qbman_pull_desc_set_numframes(&pulldesc,
-					dpaa2_dqrr_size);
-			next_pull -= dpaa2_dqrr_size;
-		} else {
-			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
-			next_pull = 0;
-		}
-
-		while (1) {
-			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued. QBMAN busy");
-				/* Portal was busy, try again */
-				continue;
-			}
-			break;
-		}
-
-		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-		/* Check if the previous issued command is completed. */
-		while (!qbman_check_command_complete(dq_storage))
-			;
-
-		num_pulled = 0;
-		pending = 1;
-
-		do {
-			/* Loop until dq_storage is updated
-			 * with new token by QBMAN
-			 */
-			while (!qbman_check_new_result(dq_storage))
-				;
-			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-
-			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-				pending = 0;
-				/* Check for valid frame. */
-				status = qbman_result_DQ_flags(dq_storage);
-				if (unlikely((status &
-					QBMAN_DQ_STAT_VALIDFRAME) == 0))
-					continue;
-			}
-			fd = qbman_result_DQ_fd(dq_storage);
-
-			vqid = qdma_vq->get_job(qdma_vq, fd,
-						&job[num_rx], &num_rx_ret);
-			if (vq_id)
-				vq_id[num_rx] = vqid;
-
-			dq_storage++;
-			num_rx += num_rx_ret;
-			num_pulled++;
-
-		} while (pending);
-	/* Last VDQ provided all packets and more packets are requested */
-	} while (next_pull && num_pulled == dpaa2_dqrr_size);
+	if (has_error)
+		*has_error = false;
 
 	return num_rx;
 }
 
-static int
-dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
-	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
-	struct qbman_eq_desc eqdesc;
-	struct qbman_swp *swp;
-	uint32_t num_to_send = 0;
-	uint16_t num_tx = 0;
-	uint32_t enqueue_loop, loop;
-	int ret;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	/* Prepare enqueue descriptor */
-	qbman_eq_desc_clear(&eqdesc);
-	qbman_eq_desc_set_fq(&eqdesc, txq_id);
-	qbman_eq_desc_set_no_orp(&eqdesc, 0);
-	qbman_eq_desc_set_response(&eqdesc, 0, 0);
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		uint16_t fd_nb;
-		uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		uint16_t job_idx = 0;
-		uint16_t fd_sg_nb[8];
-		uint16_t nb_jobs_ret = 0;
-
-		if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
-		else
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
-
-		memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
-
-		for (loop = 0; loop < fd_nb; loop++) {
-			ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
-					      sg_entry_nb);
-			if (unlikely(ret < 0))
-				return 0;
-			fd_sg_nb[loop] = sg_entry_nb;
-			nb_jobs -= sg_entry_nb;
-			job_idx += sg_entry_nb;
-			sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		}
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-
-		while (enqueue_loop < fd_nb) {
-			ret = qbman_swp_enqueue_multiple(swp,
-					&eqdesc, &fd[enqueue_loop],
-					NULL, fd_nb - enqueue_loop);
-			if (likely(ret >= 0)) {
-				for (loop = 0; loop < (uint32_t)ret; loop++)
-					nb_jobs_ret +=
-						fd_sg_nb[enqueue_loop + loop];
-				enqueue_loop += ret;
-			}
-		}
-
-		return nb_jobs_ret;
-	}
-
-	memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
-
-	while (nb_jobs > 0) {
-		num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
-			dpaa2_eqcr_size : nb_jobs;
-
-		ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
-						&job[num_tx], num_to_send);
-		if (unlikely(ret < 0))
-			break;
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-		loop = num_to_send;
-
-		while (enqueue_loop < loop) {
-			ret = qbman_swp_enqueue_multiple(swp,
-						&eqdesc,
-						&fd[num_tx + enqueue_loop],
-						NULL,
-						loop - enqueue_loop);
-			if (likely(ret >= 0))
-				enqueue_loop += ret;
-		}
-		num_tx += num_to_send;
-		nb_jobs -= loop;
-	}
-
-	qdma_vq->num_enqueues += num_tx;
-
-	return num_tx;
-}
-
-static inline int
-dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
-				qdma_vq->num_valid_jobs);
-
-	qdma_vq->num_valid_jobs = 0;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *job;
-	int idx, ret;
-
-	idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
-
-	ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return -ENOSPC;
-	}
-
-	job->src = src;
-	job->dest = dst;
-	job->len = length;
-	job->flags = flags;
-	job->status = 0;
-	job->vq_id = vchan;
-
-	qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
-	qdma_vq->num_valid_jobs++;
-
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
-		dpaa2_qdma_submit(dev_private, vchan);
-
-	return idx;
-}
-
-int
-rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-	struct rte_dpaa2_qdma_job **jobs,
-	uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_virt_queue *qdma_vq,
-	struct rte_dpaa2_qdma_job **jobs,
-	uint16_t nb_jobs)
-{
-	int ret;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-	}
-
-	/* Only dequeue when there are pending jobs on VQ */
-	if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
-		return 0;
-
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
-		qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
-		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
-				qdma_vq->num_dequeues), nb_jobs);
-
-	ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-	if (ret < 0) {
-		DPAA2_QDMA_ERR("Dequeue from DMA%d-q%d failed(%d)",
-			qdma_vq->dpdmai_dev->dpdmai_id,
-			qdma_vq->vq_id, ret);
-		return ret;
-	}
-
-	qdma_vq->num_dequeues += ret;
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-	const uint16_t nb_cpls,
-	uint16_t *last_idx,
-	enum rte_dma_status_code *st)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret, i;
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
-
-	for (i = 0; i < ret; i++)
-		st[i] = jobs[i]->status;
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue(void *dev_private,
-	uint16_t vchan, const uint16_t nb_cpls,
-	uint16_t *last_idx, bool *has_error)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret;
-
-	RTE_SET_USED(has_error);
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_vq,
-				jobs, nb_cpls);
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-uint16_t
-rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-			       struct rte_dpaa2_qdma_job **jobs,
-			       uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
-}
-
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	struct rte_dma_info *dev_info,
@@ -1119,80 +988,22 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 	/* Allocate Virtual Queues */
 	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
 	qdma_dev->vqs = rte_malloc(name,
-			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
-			RTE_CACHE_LINE_SIZE);
+		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+		RTE_CACHE_LINE_SIZE);
 	if (!qdma_dev->vqs) {
-		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+		DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.",
+			dev->data->dev_name, dev_conf->nb_vchans);
 		return -ENOMEM;
 	}
 	for (i = 0; i < dev_conf->nb_vchans; i++)
 		qdma_dev->vqs[i].vq_id = i;
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
+	qdma_dev->is_silent = dev_conf->enable_silent;
 
 	return 0;
 }
 
-static int
-check_devargs_handler(__rte_unused const char *key,
-		      const char *value,
-		      __rte_unused void *opaque)
-{
-	if (strcmp(value, "1"))
-		return -1;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
-{
-	struct rte_kvargs *kvlist;
-
-	if (!devargs)
-		return 0;
-
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (!kvlist)
-		return 0;
-
-	if (!rte_kvargs_count(kvlist, key)) {
-		rte_kvargs_free(kvlist);
-		return 0;
-	}
-
-	if (rte_kvargs_process(kvlist, key,
-			       check_devargs_handler, NULL) < 0) {
-		rte_kvargs_free(kvlist);
-		return 0;
-	}
-	rte_kvargs_free(kvlist);
-
-	return 1;
-}
-
-/* Enable FD in Ultra Short format */
-void
-rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
-}
-
-/* Enable internal SG processing */
-void
-rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
-}
-
 static int
 dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 	const struct rte_dma_vchan_conf *conf)
@@ -1236,8 +1047,8 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
 	char pool_name[64];
-	int fd_long_format = 1;
-	int sg_enable = 0, ret;
+	int ret;
+	char *env = NULL;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1247,85 +1058,70 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	if (ret)
 		return ret;
 
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
-		sg_enable = 1;
-
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
-		fd_long_format = 0;
-
-	if (dev->data->dev_conf.enable_silent)
-		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
+	/**Default enable FLE PRE POPULATE*/
+	env = getenv("DPAA2_QDMA_FLE_PRE_POPULATE");
+	if (env)
+		qdma_dev->vqs[vchan].fle_pre_populate = atoi(env);
+	else
+		qdma_dev->vqs[vchan].fle_pre_populate = 1;
 
-	if (sg_enable) {
-		if (qdma_dev->num_vqs != 1) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports physical queue!");
-			return -ENODEV;
-		}
-		if (!fd_long_format) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports long FD format!");
-			return -ENODEV;
-		}
-		pool_size = QDMA_FLE_SG_POOL_SIZE;
-	} else {
-		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
-	}
+	env = getenv("DPAA2_QDMA_DESC_DEBUG");
+	if (env && atoi(env))
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG;
+	else
+		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
+			    sizeof(struct qdma_cntx_long));
+
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+			DPAA2_QDMA_MAX_DESC * 2, pool_size,
+			512, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
 	if (!qdma_dev->vqs[vchan].fle_pool) {
-		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
-		return -ENOMEM;
-	}
-
-	snprintf(pool_name, sizeof(pool_name),
-		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev->vqs[vchan].job_pool) {
-		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
 
-	if (fd_long_format) {
-		if (sg_enable) {
-			qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
-		} else {
-			if (dev->data->dev_conf.enable_silent)
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf_no_rsp;
-			else
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
+	if (qdma_dev->is_silent) {
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_sg,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("sg cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
+		}
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_long,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
 		}
 	} else {
-		qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
-		qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
-	}
-
-	if (dpaa2_qdma_get_devargs(dev->device->devargs,
-			DPAA2_QDMA_PREFETCH)) {
-		/* If no prefetch is configured. */
-		qdma_dev->vqs[vchan].dequeue_job =
-				dpdmai_dev_dequeue_multijob_prefetch;
-		DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
-	} else {
-		qdma_dev->vqs[vchan].dequeue_job =
-			dpdmai_dev_dequeue_multijob_no_prefetch;
+		qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL,
+				sizeof(struct qdma_cntx_idx_ring),
+				RTE_CACHE_LINE_SIZE);
+		if (!qdma_dev->vqs[vchan].ring_cntx_idx) {
+			DPAA2_QDMA_ERR("DQ response ring alloc failed.");
+			return -ENOMEM;
+		}
+		qdma_dev->vqs[vchan].ring_cntx_idx->start = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->free_space =
+				QDMA_CNTX_IDX_RING_MAX_FREE;
+		qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0;
+		qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL,
+				sizeof(void *) * DPAA2_QDMA_MAX_DESC,
+				RTE_CACHE_LINE_SIZE);
 	}
 
 	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
 	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
-	qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
 
 	return 0;
 }
@@ -1374,9 +1170,12 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
-		    qdma_dev->vqs[i].num_dequeues)) {
-			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+		if ((qdma_dev->vqs[i].num_enqueues !=
+		    qdma_dev->vqs[i].num_dequeues) &&
+		    !qdma_dev->is_silent) {
+			DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")",
+				i, qdma_dev->vqs[i].num_enqueues,
+				qdma_dev->vqs[i].num_dequeues);
 			return -EBUSY;
 		}
 	}
@@ -1618,7 +1417,7 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 
 static int
 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
-		 struct rte_dpaa2_device *dpaa2_dev)
+	struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_dma_dev *dmadev;
 	int ret;
@@ -1628,8 +1427,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	RTE_SET_USED(dpaa2_drv);
 
 	dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
-				      rte_socket_id(),
-				      sizeof(struct dpaa2_dpdmai_dev));
+		rte_socket_id(),
+		sizeof(struct dpaa2_dpdmai_dev));
 	if (!dmadev) {
 		DPAA2_QDMA_ERR("Unable to allocate dmadevice");
 		return -EINVAL;
@@ -1639,10 +1438,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	dmadev->dev_ops = &dpaa2_qdma_ops;
 	dmadev->device = &dpaa2_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
-	dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
+	dmadev->fp_obj->copy = dpaa2_qdma_copy;
+	dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa2_qdma_submit;
 	dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
-	dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
 	dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 786dcb9308..ee34532408 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -5,7 +5,7 @@
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
-#define DPAA2_QDMA_MAX_DESC		1024
+#define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
@@ -13,48 +13,9 @@
 #define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
 #define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
 
-#define DPAA2_QDMA_MAX_FLE 3
-#define DPAA2_QDMA_MAX_SDD 2
-
-#define DPAA2_QDMA_MAX_SG_NB 64
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-/** FLE single job pool size: job pointer(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor.
- */
-#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
-			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-/** FLE sg jobs pool size: job number(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor  +
- * 64 (src + dst) sg entries + 64 jobs pointers.
- */
-#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
-		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
-		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
-
-#define QDMA_FLE_JOB_NB_OFFSET 0
-
-#define QDMA_FLE_SINGLE_JOB_OFFSET 0
-
-#define QDMA_FLE_FLE_OFFSET \
-		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
-
-#define QDMA_FLE_SDD_OFFSET \
-		(QDMA_FLE_FLE_OFFSET + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
-
-#define QDMA_FLE_SG_ENTRY_OFFSET \
-		(QDMA_FLE_SDD_OFFSET + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-#define QDMA_FLE_SG_JOBS_OFFSET \
-		(QDMA_FLE_SG_ENTRY_OFFSET + \
-		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
+#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
 
 /** FLE pool cache size */
 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
@@ -202,6 +163,39 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum {
+	DPAA2_QDMA_SDD_FLE,
+	DPAA2_QDMA_SRC_FLE,
+	DPAA2_QDMA_DST_FLE,
+	DPAA2_QDMA_MAX_FLE
+};
+
+enum {
+	DPAA2_QDMA_SRC_SDD,
+	DPAA2_QDMA_DST_SDD,
+	DPAA2_QDMA_MAX_SDD
+};
+
+struct qdma_cntx_fle_sdd {
+	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
+	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
+} __rte_packed;
+
+struct qdma_cntx_sg {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t job_nb;
+	uint16_t rsv[3];
+} __rte_packed;
+
+struct qdma_cntx_long {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	uint16_t cntx_idx;
+	uint16_t rsv[3];
+} __rte_packed;
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -221,27 +215,18 @@ struct dpaa2_dpdmai_dev {
 	struct qdma_device *qdma_dev;
 };
 
-struct qdma_virt_queue;
-
-typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
-					const struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t *nb_jobs);
-typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
-					struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs);
-
-typedef int (qdma_dequeue_multijob_t)(
-				struct qdma_virt_queue *qdma_vq,
-				uint16_t *vq_id,
-				struct rte_dpaa2_qdma_job **job,
-				uint16_t nb_jobs);
+#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
+#define QDMA_CNTX_IDX_RING_MAX_FREE \
+	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
+struct qdma_cntx_idx_ring {
+	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
+	uint16_t start;
+	uint16_t tail;
+	uint16_t free_space;
+	uint16_t nb_in_ring;
+};
 
-typedef int (qdma_enqueue_multijob_t)(
-			struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs);
+#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
@@ -249,10 +234,11 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
-	uint8_t in_use;
+	uint8_t fle_pre_populate;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
@@ -262,18 +248,17 @@ struct qdma_virt_queue {
 
 	uint16_t vq_id;
 	uint32_t flags;
+	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
+	uint16_t fd_idx;
+	struct qdma_cntx_idx_ring *ring_cntx_idx;
+
+	/**Used for silent enabled*/
+	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
 
-	struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
-	struct rte_mempool *job_pool;
 	int num_valid_jobs;
 
 	struct rte_dma_stats stats;
-
-	qdma_set_fd_t *set_fd;
-	qdma_get_job_t *get_job;
-
-	qdma_dequeue_multijob_t *dequeue_job;
-	qdma_enqueue_multijob_t *enqueue_job;
 };
 
 /** Represents a QDMA device. */
@@ -284,6 +269,7 @@ struct qdma_device {
 	uint16_t num_vqs;
 	/** Device state - started or stopped */
 	uint8_t state;
+	uint8_t is_silent;
 };
 
 #endif /* _DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index b0bf9d8bcc..729bff42bb 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,118 +7,19 @@
 
 #include <rte_compat.h>
 
-/** States if the source addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_SRC_PHY		(1ULL << 30)
+#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
+#define RTE_DPAA2_QDMA_LEN_MASK \
+	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/** States if the destination addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
+#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
+	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
 
-/** Determines a QDMA job */
-struct rte_dpaa2_qdma_job {
-	/** Source Address from where DMA is (to be) performed */
-	uint64_t src;
-	/** Destination Address where DMA is (to be) done */
-	uint64_t dest;
-	/** Length of the DMA operation in bytes. */
-	uint32_t len;
-	/** See RTE_QDMA_JOB_ flags */
-	uint32_t flags;
-	/**
-	 * Status of the transaction.
-	 * This is filled in the dequeue operation by the driver.
-	 * upper 8bits acc_err for route by port.
-	 * lower 8bits fd error
-	 */
-	uint16_t status;
-	uint16_t vq_id;
-	uint64_t cnxt;
-	/**
-	 * FLE pool element maintained by user, in case no qDMA response.
-	 * Note: the address must be allocated from DPDK memory pool.
-	 */
-	void *usr_elem;
-};
+#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
+	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable FD in Ultra Short format on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable internal SG processing on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
+	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enqueue a copy operation onto the virtual DMA channel for silent mode,
- * when dequeue is not required.
- *
- * This queues up a copy operation to be performed by hardware, if the 'flags'
- * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
- * this operation, otherwise do not trigger doorbell.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs to be submitted to QDMA.
- * @param nb_cpls
- *   Number of DMA jobs.
- *
- * @return
- *   - >= 0..Number of enqueued job.
- *   - -ENOSPC: if no space left to enqueue.
- *   - other values < 0 on failure.
- */
-__rte_experimental
-int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Return the number of operations that have been successfully completed.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs completed by QDMA.
- * @param nb_cpls
- *   Number of completed DMA jobs.
- *
- * @return
- *   The number of operations that successfully completed. This return value
- *   must be less than or equal to the value of nb_cpls.
- */
-__rte_experimental
-uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
 
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
deleted file mode 100644
index eb012cfbfc..0000000000
--- a/drivers/dma/dpaa2/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-DPDK_24 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 22.07
-	rte_dpaa2_qdma_completed_multi;
-	rte_dpaa2_qdma_copy_multi;
-	rte_dpaa2_qdma_vchan_fd_us_enable;
-	rte_dpaa2_qdma_vchan_internal_sg_enable;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 04/30] dma/dpaa2: multiple process support
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
  2024-07-19 10:00 ` [PATCH 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
                   ` (26 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Anatoly Burakov; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Support multiple processes for dpaa2 dma.
1) Move queue configuration procedure from init function to
device configuration function which is called by user.

2) Instances of dpaa2_dpdmai_dev and qdma_device are allocated
from primary process and shared between multiple processes.

3) MC reg is per process mapped.

4) User is responsible to check vq number configured before using
dma device to identify if this device is occupied by other process.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 405 ++++++++++++++++++++-------------
 drivers/dma/dpaa2/dpaa2_qdma.h |   6 +-
 2 files changed, 254 insertions(+), 157 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index b467845fa0..51affed40c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #include <rte_eal.h>
@@ -19,6 +19,8 @@
 static uint32_t dpaa2_coherent_no_alloc_cache;
 static uint32_t dpaa2_coherent_alloc_cache;
 
+static struct fsl_mc_io s_proc_mc_reg;
+
 static inline int
 qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
 	const uint16_t *elem, uint16_t nb,
@@ -960,6 +962,9 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->dev_name = dev->device->name;
+	if (dpdmai_dev->qdma_dev)
+		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
 
 	return 0;
 }
@@ -969,25 +974,102 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 	const struct rte_dma_conf *dev_conf,
 	uint32_t conf_sz)
 {
-	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint16_t i;
+	struct dpdmai_rx_queue_cfg rx_queue_cfg;
+	struct dpdmai_rx_queue_attr rx_attr;
+	struct dpdmai_tx_queue_attr tx_attr;
+	struct dpaa2_queue *rxq;
+	int ret = 0;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR("%s Not stopped, configure failed.",
-			dev->data->dev_name);
-		return -EBUSY;
+	if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+		DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+			dev->data->dev_name, dev_conf->nb_vchans,
+			dpdmai_dev->num_queues);
+
+		return -ENOTSUP;
+	}
+
+	if (qdma_dev->vqs) {
+		DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+			dev->data->dev_name,
+			qdma_dev->num_vqs, dev_conf->nb_vchans);
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if ((qdma_dev->vqs[i].num_enqueues !=
+				qdma_dev->vqs[i].num_dequeues) &&
+				!qdma_dev->is_silent) {
+				DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+					i, qdma_dev->vqs[i].num_enqueues -
+					qdma_dev->vqs[i].num_dequeues);
+				return -EBUSY;
+			}
+		}
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+			rxq = &dpdmai_dev->rx_queue[i];
+			if (rxq->q_storage) {
+				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+					dev->data->dev_name, i);
+				dpaa2_free_dq_storage(rxq->q_storage);
+				rte_free(rxq->q_storage);
+				rxq->q_storage = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+		qdma_dev->num_vqs = 0;
+	}
+
+	/* Set up Rx Queues */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		rxq = &dpdmai_dev->rx_queue[i];
+		ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			return ret;
+		}
+	}
+
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+		ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
 	}
 
 	/* Allocate Virtual Queues */
-	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
-	qdma_dev->vqs = rte_malloc(name,
+	qdma_dev->vqs = rte_zmalloc(NULL,
 		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
 		RTE_CACHE_LINE_SIZE);
 	if (!qdma_dev->vqs) {
@@ -995,13 +1077,50 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 			dev->data->dev_name, dev_conf->nb_vchans);
 		return -ENOMEM;
 	}
-	for (i = 0; i < dev_conf->nb_vchans; i++)
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		qdma_dev->vqs[i].vq_id = i;
+		rxq = &dpdmai_dev->rx_queue[i];
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq->q_storage = rte_zmalloc(NULL,
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
 	qdma_dev->is_silent = dev_conf->enable_silent;
 
 	return 0;
+
+alloc_failed:
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	rte_free(qdma_dev->vqs);
+	qdma_dev->vqs = NULL;
+	qdma_dev->num_vqs = 0;
+
+	return ret;
 }
 
 static int
@@ -1130,11 +1249,17 @@ static int
 dpaa2_qdma_start(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 1;
+	/* Enable the device */
+	ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1143,30 +1268,33 @@ static int
 dpaa2_qdma_stop(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 0;
+	/* Disable the device */
+	ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
 
 static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct dpaa2_queue *rxq;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR("%s Not stopped, reset failed.",
-			dev->data->dev_name);
-		return -EBUSY;
-	}
+	if (!qdma_dev)
+		return 0;
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
@@ -1180,8 +1308,31 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 		}
 	}
 
-	rte_free(qdma_dev->vqs);
-	qdma_dev->vqs = NULL;
+	/* Free RXQ storages */
+	for (i = 0; i < qdma_dev->num_vqs; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	if (qdma_dev->vqs) {
+		/* Free RXQ fle pool */
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+	}
 
 	/* Reset QDMA device structure */
 	qdma_dev->num_vqs = 0;
@@ -1190,18 +1341,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 }
 
 static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	dpaa2_qdma_reset(dev);
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1256,56 +1397,97 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpaa2_queue *rxq;
-	int ret, i;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai(%d) disable failed",
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
 			dpdmai_dev->dpdmai_id);
-	}
-
-	/* Set up the DQRR storage for Rx */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		return 0;
 	}
 
 	/* Close the device at underlying layer*/
-	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+	ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai(%d) close failed",
-			dpdmai_dev->dpdmai_id);
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+			dpdmai_dev->dpdmai_id, ret);
+
+		return ret;
+	}
+
+	if (qdma_dev) {
+		rte_free(qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
 	}
 
 	return ret;
 }
 
 static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpdmai_rx_queue_cfg rx_queue_cfg;
 	struct dpdmai_attr attr;
-	struct dpdmai_rx_queue_attr rx_attr;
-	struct dpdmai_tx_queue_attr tx_attr;
-	struct dpaa2_queue *rxq;
-	int ret, i;
+	int ret, err;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
+	if (!dpaa2_coherent_no_alloc_cache) {
+		if (dpaa2_svr_family == SVR_LX2160A) {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
+		} else {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_COHERENT_ALLOCATE_CACHE;
+		}
+	}
+
+	if (!s_proc_mc_reg.regs)
+		s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+			dpdmai_id);
+		if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+			DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+				dpdmai_id, dpdmai_dev->dpdmai_id);
+			return -EINVAL;
+		}
+		if (!dpdmai_dev->qdma_dev) {
+			DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+				dpdmai_id);
+			return -ENOMEM;
+		}
+		if (dpdmai_dev->qdma_dev->num_vqs) {
+			DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+				dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+		}
+
+		return 0;
+	}
+
 	/* Open DPDMAI device */
 	dpdmai_dev->dpdmai_id = dpdmai_id;
-	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL,
+
+	if (dpdmai_dev->qdma_dev) {
+		rte_free(dpdmai_dev->qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
+	dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
 		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
-	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+	if (!dpdmai_dev->qdma_dev) {
+		DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+			dpdmai_id);
+		return -ENOMEM;
+	}
+	ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
 			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
 	if (ret) {
 		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
@@ -1314,105 +1496,24 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 	}
 
 	/* Get DPDMAI attributes */
-	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+	ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
 			dpdmai_dev->token, &attr);
 	if (ret) {
 		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
 			__func__, dpdmai_dev->dpdmai_id, ret);
-		goto init_err;
-	}
-	dpdmai_dev->num_queues = attr.num_of_queues;
-
-	/* Set up Rx Queues */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-		ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				CMD_PRI_LOW,
-				dpdmai_dev->token,
-				i, 0, &rx_queue_cfg);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q%d set failed(%d)",
-				dev->data->dev_name, i, ret);
-			goto init_err;
-		}
-
-		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq = &dpdmai_dev->rx_queue[i];
-		rxq->q_storage = rte_malloc("dq_storage",
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s DQ info(Q%d) alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto init_err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s DQ storage(Q%d) alloc failed(%d)",
-				dev->data->dev_name, i, ret);
-			goto init_err;
-		}
-	}
-
-	/* Get Rx and Tx queues FQID's */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				dpdmai_dev->token, i, 0, &rx_attr);
-		if (ret) {
-			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
-				dpdmai_dev->dpdmai_id, i, ret);
-			goto init_err;
-		}
-		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
-
-		ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				dpdmai_dev->token, i, 0, &tx_attr);
-		if (ret) {
-			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
-				dpdmai_dev->dpdmai_id, i, ret);
-			goto init_err;
-		}
-		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
-	}
-
-	/* Enable the device */
-	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			    dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
-		goto init_err;
-	}
-
-	if (!dpaa2_coherent_no_alloc_cache) {
-		if (dpaa2_svr_family == SVR_LX2160A) {
-			dpaa2_coherent_no_alloc_cache =
-				DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
-			dpaa2_coherent_alloc_cache =
-				DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
-		} else {
-			dpaa2_coherent_no_alloc_cache =
-				DPAA2_COHERENT_NO_ALLOCATE_CACHE;
-			dpaa2_coherent_alloc_cache =
-				DPAA2_COHERENT_ALLOCATE_CACHE;
+		err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+		if (err) {
+			DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+				dpdmai_dev->dpdmai_id, err);
 		}
+		return ret;
 	}
+	dpdmai_dev->num_queues = attr.num_of_queues;
 
-	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
-
-	/* Reset the QDMA device */
-	ret = dpaa2_qdma_reset(dev);
-	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		goto init_err;
-	}
+	DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
 
 	return 0;
-init_err:
-	dpaa2_dpdmai_dev_uninit(dev);
-	return ret;
 }
 
 static int
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index ee34532408..743a43fa14 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -200,8 +200,6 @@ struct qdma_cntx_long {
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
 	TAILQ_ENTRY(dpaa2_qdma_device) next;
-	/** handle to DPDMAI object */
-	struct fsl_mc_io dpdmai;
 	/** HW ID for DPDMAI object */
 	uint32_t dpdmai_id;
 	/** Tocken of this device */
@@ -267,8 +265,6 @@ struct qdma_device {
 	struct qdma_virt_queue *vqs;
 	/** Total number of VQ's */
 	uint16_t num_vqs;
-	/** Device state - started or stopped */
-	uint8_t state;
 	uint8_t is_silent;
 };
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 05/30] dma/dpaa2: add sanity check for SG entry
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (2 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 04/30] dma/dpaa2: multiple process support Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
                   ` (25 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Make sure the SG entry number doesn't overflow.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 51affed40c..e521df8817 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -615,8 +615,17 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
-	if (unlikely(nb_src != nb_dst))
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
+			nb_src, nb_dst);
 		return -ENOTSUP;
+	}
+
+	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
+			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+		return -EINVAL;
+	}
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (3 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
                   ` (24 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include dpaa2_hw_pvt.h and dpaa2_hw_dpio.h files

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 743a43fa14..eb02bff08f 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -5,6 +5,9 @@
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
 #define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (4 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
                   ` (23 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For copy_sg: pass job index lists.
For copy: pass job index.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 92 ++++++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  7 ++
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h | 15 ++++-
 3 files changed, 68 insertions(+), 46 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index e521df8817..b38a0f1bac 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -280,25 +280,22 @@ sg_entry_post_populate(const struct rte_dma_sge *src,
 	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
 	uint16_t nb_sge)
 {
-	uint16_t i = 0, idx;
-	uint32_t total_len = 0, len;
+	uint16_t i;
+	uint32_t total_len = 0;
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
 	for (i = 0; i < (nb_sge - 1); i++) {
 		if (unlikely(src[i].length != dst[i].length))
 			return -ENOTSUP;
-		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
 		src_sge->addr_lo = (uint32_t)src[i].addr;
 		src_sge->addr_hi = (src[i].addr >> 32);
-		src_sge->data_len.data_len_sl0 = len;
+		src_sge->data_len.data_len_sl0 = src[i].length;
 
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
-		dst_sge->data_len.data_len_sl0 = len;
-		total_len += len;
-		sg_cntx->cntx_idx[i] = idx;
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
+		total_len += dst[i].length;
 
 		src_sge->ctrl.f = 0;
 		dst_sge->ctrl.f = 0;
@@ -309,19 +306,15 @@ sg_entry_post_populate(const struct rte_dma_sge *src,
 	if (unlikely(src[i].length != dst[i].length))
 		return -ENOTSUP;
 
-	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-	idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
-
 	src_sge->addr_lo = (uint32_t)src[i].addr;
 	src_sge->addr_hi = (src[i].addr >> 32);
-	src_sge->data_len.data_len_sl0 = len;
+	src_sge->data_len.data_len_sl0 = src[i].length;
 
 	dst_sge->addr_lo = (uint32_t)dst[i].addr;
 	dst_sge->addr_hi = (dst[i].addr >> 32);
-	dst_sge->data_len.data_len_sl0 = len;
+	dst_sge->data_len.data_len_sl0 = dst[i].length;
 
-	total_len += len;
-	sg_cntx->cntx_idx[i] = idx;
+	total_len += dst[i].length;
 	sg_cntx->job_nb = nb_sge;
 
 	src_sge->ctrl.f = QDMA_SG_F;
@@ -343,20 +336,18 @@ sg_entry_populate(const struct rte_dma_sge *src,
 	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
 	uint16_t nb_sge)
 {
-	uint16_t i, idx;
-	uint32_t total_len = 0, len;
+	uint16_t i;
+	uint32_t total_len = 0;
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
 	for (i = 0; i < nb_sge; i++) {
 		if (unlikely(src[i].length != dst[i].length))
 			return -ENOTSUP;
-		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
 
 		src_sge->addr_lo = (uint32_t)src[i].addr;
 		src_sge->addr_hi = (src[i].addr >> 32);
-		src_sge->data_len.data_len_sl0 = len;
+		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -366,7 +357,7 @@ sg_entry_populate(const struct rte_dma_sge *src,
 #endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
-		dst_sge->data_len.data_len_sl0 = len;
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -374,8 +365,7 @@ sg_entry_populate(const struct rte_dma_sge *src,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += len;
-		sg_cntx->cntx_idx[i] = idx;
+		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
@@ -606,14 +596,15 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	int ret = 0, expected;
-	uint32_t cntx_idx, len;
+	int ret = 0, expected, i;
+	uint32_t len;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_sg *cntx_sg = NULL;
 	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
+	const uint16_t *idx_addr = NULL;
 
 	if (unlikely(nb_src != nb_dst)) {
 		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
@@ -630,14 +621,16 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	memset(fd, 0, sizeof(struct qbman_fd));
 
 	if (qdma_dev->is_silent) {
-		cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[0].length);
-		cntx_sg = qdma_vq->cntx_sg[cntx_idx];
+		cntx_sg = qdma_vq->cntx_sg[qdma_vq->slient_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
 			(void **)&cntx_sg);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+		for (i = 0; i < nb_src; i++)
+			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -656,8 +649,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
 	if (qdma_vq->fle_pre_populate) {
-		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length))
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
 			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
+			if (!qdma_dev->is_silent && cntx_sg && idx_addr) {
+				for (i = 0; i < nb_src; i++)
+					cntx_sg->cntx_idx[i] = idx_addr[i];
+			}
+		}
 
 		len = sg_entry_post_populate(src, dst,
 			cntx_sg, nb_src);
@@ -683,6 +681,8 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
 	qdma_vq->fd_idx++;
+	qdma_vq->slient_idx =
+		(qdma_vq->slient_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
@@ -705,28 +705,23 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
-	uint16_t cntx_idx;
-	uint32_t len;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long;
+	struct qdma_cntx_long *cntx_long = NULL;
 	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length);
-	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length);
-
 	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[cntx_idx];
+		cntx_long = qdma_vq->cntx_long[qdma_vq->slient_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
 			(void **)&cntx_long);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = cntx_idx;
+		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -749,16 +744,20 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 			fle_sdd_pre_populate(&cntx_long->fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+			if (!qdma_dev->is_silent && cntx_long) {
+				cntx_long->cntx_idx =
+					DPAA2_QDMA_IDX_FROM_FLAG(flags);
+			}
 		}
 
-		fle_post_populate(fle, src, dst, len);
+		fle_post_populate(fle, src, dst, length);
 	} else {
 		sdd = cntx_long->fle_sdd.sdd;
 		sdd_iova = cntx_iova +
 			offsetof(struct qdma_cntx_long, fle_sdd) +
 			offsetof(struct qdma_cntx_fle_sdd, sdd);
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
-			src, dst, len,
+			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
@@ -766,6 +765,8 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
 	qdma_vq->fd_idx++;
+	qdma_vq->slient_idx =
+		(qdma_vq->slient_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
@@ -963,14 +964,17 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
+		RTE_DMA_CAPA_MEM_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_MEM |
+		RTE_DMA_CAPA_SILENT |
+		RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index eb02bff08f..c93e0098a5 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -199,6 +199,12 @@ struct qdma_cntx_long {
 	uint16_t rsv[3];
 } __rte_packed;
 
+#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -256,6 +262,7 @@ struct qdma_virt_queue {
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
 	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	uint16_t slient_idx;
 
 	int num_valid_jobs;
 
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 729bff42bb..e49604c8fc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2023 NXP
  */
 
 #ifndef _RTE_PMD_DPAA2_QDMA_H_
@@ -20,6 +20,17 @@
 #define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
 	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
 
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (5 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 09/30] dma/dpaa2: add short FD support Gagandeep Singh
                   ` (22 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Multiple DQ storages are used among multiple cores, the single dq
storage of first union is leak if multiple storages are allocated.
It does not make sense to keep the single dq storage of union,
remove it and reuse the first dq storage of multiple storages
for this case.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c    | 25 ++-----
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     | 38 +++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 23 ++----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c |  4 +-
 drivers/dma/dpaa2/dpaa2_qdma.c              | 43 ++---------
 drivers/net/dpaa2/dpaa2_ethdev.c            | 81 ++++++++-------------
 drivers/net/dpaa2/dpaa2_rxtx.c              | 19 +++--
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c       |  4 +-
 9 files changed, 103 insertions(+), 141 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 07256ed7ec..160126f6d6 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 		}
 
 		/* Allocate DQ storage for the DPCI Rx queues */
-		rxq = &(dpci_node->rx_queue[i]);
-		rxq->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_BUS_ERR("q_storage allocation failed\n");
-			ret = -ENOMEM;
+		rxq = &dpci_node->rx_queue[i];
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
-			goto err;
-		}
 	}
 
 	/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 
 err:
 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
-		struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
 
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 	rte_free(dpci_node);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 4aec7b2cd8..a8afc772fd 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
 	}
 }
 
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	int i = 0;
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
-		q_storage->dq_storage[i] = rte_malloc(NULL,
+		q_storage->dq_storage[i] = rte_zmalloc(NULL,
 			dpaa2_dqrr_size * sizeof(struct qbman_result),
 			RTE_CACHE_LINE_SIZE);
 		if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	}
 	return 0;
 fail:
-	while (--i >= 0)
+	while (--i >= 0) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
+	}
 
 	return -1;
 }
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..1ce481c88d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
 	uint64_t tx_pkts;
 	uint64_t err_pkts;
 	union {
-		struct queue_storage_info_t *q_storage;
+		/**Ingress*/
+		struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+		/**Egress*/
 		struct qbman_result *cscn;
 	};
 	struct rte_event ev;
@@ -186,6 +188,38 @@ struct swp_active_dqs {
 	uint64_t reserved[7];
 };
 
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+	int ret = 0, i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		(q)->q_storage[i] = rte_zmalloc(NULL, \
+			sizeof(struct queue_storage_info_t), \
+			RTE_CACHE_LINE_SIZE); \
+		if (!(q)->q_storage[i]) { \
+			ret = -ENOBUFS; \
+			break; \
+		} \
+		ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+		if (ret) \
+			break; \
+	} \
+	ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+	int i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		if ((q)->q_storage[i]) { \
+			dpaa2_free_dq_storage((q)->q_storage[i]); \
+			rte_free((q)->q_storage[i]); \
+			(q)->q_storage[i] = NULL; \
+		} \
+	} \
+})
+
 #define NUM_MAX_SWP 64
 
 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index c1f7181d55..7df208d004 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1891,7 +1891,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
@@ -1982,10 +1982,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (qp->rx_vq.q_storage) {
-		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
-		rte_free(qp->rx_vq.q_storage);
-	}
+	dpaa2_queue_storage_free(&qp->rx_vq, 1);
 	rte_mempool_free(qp->fle_pool);
 	rte_free(qp);
 
@@ -2036,18 +2033,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->rx_vq.crypto_data = dev->data;
 	qp->tx_vq.crypto_data = dev->data;
-	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
-		sizeof(struct queue_storage_info_t),
-		RTE_CACHE_LINE_SIZE);
-	if (!qp->rx_vq.q_storage) {
-		DPAA2_SEC_ERR("malloc failed for q_storage");
-		return -ENOMEM;
-	}
-	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
-	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
-		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
-		return -ENOMEM;
+	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+	if (retcode) {
+		dpaa2_queue_storage_free((&qp->rx_vq), 1);
+		return retcode;
 	}
 
 	dev->data->queue_pairs[qp_id] = qp;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 4754b9d6f8..c51e68f748 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
  */
 
 #include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index b38a0f1bac..54617b7e16 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #include <rte_eal.h>
@@ -824,7 +824,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		dpaa2_dqrr_size : nb_cpls;
 	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
 	fqid = rxq->fqid;
-	q_storage = rxq->q_storage;
+	q_storage = rxq->q_storage[0];
 
 	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
@@ -1032,13 +1032,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 				qdma_dev->vqs[i].ring_cntx_idx = NULL;
 			}
 			rxq = &dpdmai_dev->rx_queue[i];
-			if (rxq->q_storage) {
-				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
-					dev->data->dev_name, i);
-				dpaa2_free_dq_storage(rxq->q_storage);
-				rte_free(rxq->q_storage);
-				rxq->q_storage = NULL;
-			}
+			dpaa2_queue_storage_free(rxq, 1);
 		}
 		rte_free(qdma_dev->vqs);
 		qdma_dev->vqs = NULL;
@@ -1094,24 +1088,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		qdma_dev->vqs[i].vq_id = i;
 		rxq = &dpdmai_dev->rx_queue[i];
 		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq->q_storage = rte_zmalloc(NULL,
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto alloc_failed;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto alloc_failed;
-		}
 	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1122,11 +1101,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 alloc_failed:
 	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	rte_free(qdma_dev->vqs);
@@ -1324,11 +1299,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
 	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	if (qdma_dev->vqs) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 449bbda7ca..ac524d2964 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	uint8_t num_rxqueue_per_tc;
 	struct dpaa2_queue *mc_q, *mcq;
 	uint32_t tot_queues;
-	int i;
+	int i, ret = 0;
 	struct dpaa2_queue *dpaa2_q;
 
 	PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < priv->nb_rx_queues; i++) {
 		mc_q->eth_data = dev->data;
 		priv->rx_vq[i] = mc_q++;
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_q->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
-			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+		dpaa2_q = priv->rx_vq[i];
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
 	}
 
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 		if (!priv->rx_err_vq)
 			goto fail;
 
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
-					sizeof(struct queue_storage_info_t) *
-					RTE_MAX_LCORE,
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
+		dpaa2_q = priv->rx_err_vq;
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		for (i = 0; i < RTE_MAX_LCORE; i++)
-			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
-				goto fail;
 	}
 
 	for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 			mc_q->tc_index = i;
 			mc_q->flow_id = 0;
 			priv->tx_conf_vq[i] = mc_q++;
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-			dpaa2_q->q_storage =
-				rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-			if (!dpaa2_q->q_storage)
-				goto fail_tx_conf;
-
-			memset(dpaa2_q->q_storage, 0,
-			       sizeof(struct queue_storage_info_t));
-			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+			dpaa2_q = priv->tx_conf_vq[i];
+			ret = dpaa2_queue_storage_alloc(dpaa2_q,
+					RTE_MAX_LCORE);
+			if (ret)
 				goto fail_tx_conf;
 		}
 	}
 
 	vq_id = 0;
 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
-		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+		mcq = priv->rx_vq[vq_id];
 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 		vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 fail_tx_conf:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->tx_conf_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->tx_conf_vq[i--] = NULL;
 	}
 	i = priv->nb_tx_queues;
 fail_tx:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+		dpaa2_q = priv->tx_vq[i];
 		rte_free(dpaa2_q->cscn);
 		priv->tx_vq[i--] = NULL;
 	}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	i -= 1;
 	mc_q = priv->rx_vq[0];
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->rx_vq[i--] = NULL;
 	}
 
 	if (dpaa2_enable_err_queue) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		if (dpaa2_q->q_storage)
-			dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_err_vq;
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 	}
 
 	rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 	if (priv->rx_vq[0]) {
 		/* cleaning up queue storage */
 		for (i = 0; i < priv->nb_rx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-			rte_free(dpaa2_q->q_storage);
+			dpaa2_q = priv->rx_vq[i];
+			dpaa2_queue_storage_free(dpaa2_q,
+				RTE_MAX_LCORE);
 		}
 		/* cleanup tx queue cscn */
 		for (i = 0; i < priv->nb_tx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+			dpaa2_q = priv->tx_vq[i];
 			rte_free(dpaa2_q->cscn);
 		}
 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
 			/* cleanup tx conf queue storage */
 			for (i = 0; i < priv->nb_tx_queues; i++) {
-				dpaa2_q = (struct dpaa2_queue *)
-						priv->tx_conf_vq[i];
-				rte_free(dpaa2_q->q_storage);
+				dpaa2_q = priv->tx_conf_vq[i];
+				dpaa2_queue_storage_free(dpaa2_q,
+					RTE_MAX_LCORE);
 			}
 		}
 		/*free memory for all queues (RX+TX) */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 23f7c4132d..a0c057d183 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
 	if (unlikely(dpaa2_enable_err_queue))
 		dump_err_pkts(priv->rx_err_vq);
 
@@ -958,7 +960,7 @@ uint16_t
 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ */
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
 	struct qbman_pull_desc pulldesc;
 	struct qbman_eq_desc eqdesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
 	/* todo - currently we are using 1st TX queue only for loopback*/
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
 		ret = dpaa2_affine_qbman_ethrx_swp();
 		if (ret) {
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index aeee4ac289..5f4d0c68a4 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
  */
 
 #include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
 
 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
-	dq_storage = rxq->q_storage->dq_storage[0];
+	dq_storage = rxq->q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 09/30] dma/dpaa2: add short FD support
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (6 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
                   ` (21 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Short FD can be used for single transfer scenario which shows higher
performance than FLE.
1) Save index context in FD att field for short and FLE(NonSG).
2) Identify FD type by att of FD.
3) Force 48 bits address for source address and fle according to spec.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 314 +++++++++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  69 ++++--
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  13 -
 3 files changed, 285 insertions(+), 111 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 54617b7e16..eb2d96a35e 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -522,7 +522,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 	const struct qdma_cntx_fle_sdd *fle_sdd;
 	const struct qdma_sdd *sdd;
 	const struct qdma_cntx_sg *cntx_sg = NULL;
-	const struct qdma_cntx_long *cntx_long = NULL;
 
 	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
 	sdd = fle_sdd->sdd;
@@ -545,11 +544,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		QBMAN_FLE_WORD4_FMT_SGE) {
 		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
 			fle_sdd);
-	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
 		QBMAN_FLE_WORD4_FMT_SBF) {
-		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
-			fle_sdd);
-	} else {
 		DPAA2_QDMA_ERR("Unsupported fle format:%d",
 			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
 		return;
@@ -560,11 +556,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		dpaa2_qdma_sdd_dump(&sdd[i]);
 	}
 
-	if (cntx_long) {
-		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
-			cntx_long->cntx_idx);
-	}
-
 	if (cntx_sg) {
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
@@ -582,6 +573,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
 				cntx_sg->cntx_idx[i]);
 		}
+	} else {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx");
 	}
 }
 
@@ -644,7 +637,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		offsetof(struct qdma_cntx_sg, fle_sdd) +
 		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
@@ -680,6 +673,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
+	dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG);
 	qdma_vq->fd_idx++;
 	qdma_vq->slient_idx =
 		(qdma_vq->slient_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -696,74 +690,178 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static inline void
+qdma_populate_fd_pci(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd,
+	struct dpaa2_qdma_rbp *rbp, int ser)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits(src);
+	fd->simple_pci.saddr_hi = upper_32_bits(src);
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+
+	fd->simple_pci.sportid = rbp->sportid;
+
+	fd->simple_pci.svfid = rbp->svfid;
+	fd->simple_pci.spfid = rbp->spfid;
+	fd->simple_pci.svfa = rbp->svfa;
+	fd->simple_pci.dvfid = rbp->dvfid;
+	fd->simple_pci.dpfid = rbp->dpfid;
+	fd->simple_pci.dvfa = rbp->dvfa;
+
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits(dest);
+	fd->simple_pci.daddr_hi = upper_32_bits(dest);
+}
+
+static inline void
+qdma_populate_fd_ddr(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd, int ser)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(src);
+	fd->simple_ddr.saddr_hi = upper_32_bits(src);
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss.
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits(dest);
+	fd->simple_ddr.daddr_hi = upper_32_bits(dest);
+}
+
 static int
-dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
+dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long = NULL;
-	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) {
+		/** PCIe EP*/
+		qdma_populate_fd_pci(src,
+			dst, length,
+			fd, &qdma_vq->rbp,
+			is_silent ? 0 : 1);
+	} else {
+		/** DDR or PCIe RC*/
+		qdma_populate_fd_ddr(src,
+			dst, length,
+			fd, is_silent ? 0 : 1);
+	}
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_SHORT);
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
+	} else {
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
+	}
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
+{
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_fle_sdd *fle_sdd = NULL;
+	rte_iova_t fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[qdma_vq->slient_idx];
+	if (is_silent) {
+		fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->slient_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
-			(void **)&cntx_long);
+			(void **)&fle_sdd);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_long);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
-#endif
-
-	fle = cntx_long->fle_sdd.fle;
-	fle_iova = cntx_iova +
-		offsetof(struct qdma_cntx_long, fle_sdd) +
-		offsetof(struct qdma_cntx_fle_sdd, fle);
+	fle = fle_sdd->fle;
+	fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset;
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)fle);
 
 	if (qdma_vq->fle_pre_populate) {
 		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
-			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+			fle_sdd_pre_populate(fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
-			if (!qdma_dev->is_silent && cntx_long) {
-				cntx_long->cntx_idx =
-					DPAA2_QDMA_IDX_FROM_FLAG(flags);
-			}
 		}
 
 		fle_post_populate(fle, src, dst, length);
 	} else {
-		sdd = cntx_long->fle_sdd.sdd;
-		sdd_iova = cntx_iova +
-			offsetof(struct qdma_cntx_long, fle_sdd) +
-			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		sdd = fle_sdd->sdd;
+		sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset;
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
 			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
-		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
+		dpaa2_qdma_long_fmt_dump(fle);
 
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_LONG);
 	qdma_vq->fd_idx++;
 	qdma_vq->slient_idx =
 		(qdma_vq->slient_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -771,15 +869,89 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
 }
 
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	if (qdma_vq->using_short_fd)
+		return dpaa2_qdma_short_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+	else
+		return dpaa2_qdma_long_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+}
+
+static inline int
+dpaa2_qdma_dq_fd(const struct qbman_fd *fd,
+	struct qdma_virt_queue *qdma_vq,
+	uint16_t *free_space, uint16_t *fle_elem_nb)
+{
+	uint16_t idx, att;
+	enum dpaa2_qdma_fd_type type;
+	int ret;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+
+	att = dpaa2_qdma_fd_get_att(fd);
+	type = DPAA2_QDMA_FD_ATT_TYPE(att);
+	if (type == DPAA2_QDMA_FD_SHORT) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_LONG) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		fle_sdd = (void *)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_SG) {
+		fle_sdd = (void *)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, free_space);
+		if (unlikely(ret < cntx_sg->job_nb))
+			return -ENOSPC;
+
+		return 0;
+	}
+
+	DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x",
+		fd->simple_ddr.rsv1_att);
+	return -EIO;
+}
+
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
@@ -799,10 +971,6 @@ dpaa2_qdma_dequeue(void *dev_private,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	int ret, pull_size;
-	struct qbman_fle *fle;
-	struct qdma_cntx_fle_sdd *fle_sdd;
-	struct qdma_cntx_sg *cntx_sg;
-	struct qdma_cntx_long *cntx_long;
 	uint16_t free_space = 0, fle_elem_nb = 0;
 
 	if (unlikely(qdma_dev->is_silent))
@@ -901,25 +1069,8 @@ dpaa2_qdma_dequeue(void *dev_private,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-		fle_sdd = (void *)DPAA2_GET_FD_FLC(fd);
-		fle = fle_sdd->fle;
-		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
-		fle_elem_nb++;
-		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
-			QBMAN_FLE_WORD4_FMT_SGE) {
-			cntx_sg = container_of(fle_sdd,
-				struct qdma_cntx_sg, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				cntx_sg->cntx_idx,
-				cntx_sg->job_nb, &free_space);
-		} else {
-			cntx_long = container_of(fle_sdd,
-				struct qdma_cntx_long, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				&cntx_long->cntx_idx,
-				1, &free_space);
-		}
-		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
+		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -944,8 +1095,10 @@ dpaa2_qdma_dequeue(void *dev_private,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	rte_mempool_put_bulk(qdma_vq->fle_pool,
-		qdma_vq->fle_elem, fle_elem_nb);
+	if (fle_elem_nb > 0) {
+		rte_mempool_put_bulk(qdma_vq->fle_pool,
+			qdma_vq->fle_elem, fle_elem_nb);
+	}
 
 	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
 		cntx_idx, nb_cpls);
@@ -1178,11 +1331,18 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	else
 		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
+	/** Default Enable Short FD for nonSG format.
+	 * Short FD has higher perf than FLE.
+	 */
+	env = getenv("DPAA2_QDMA_USING_SHORT_FD");
+	if (env)
+		qdma_dev->vqs[vchan].using_short_fd = atoi(env);
+	else
+		qdma_dev->vqs[vchan].using_short_fd = 1;
+
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
-			    sizeof(struct qdma_cntx_long));
-
+	pool_size = sizeof(struct qdma_cntx_sg);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
 			DPAA2_QDMA_MAX_DESC * 2, pool_size,
 			512, 0, NULL, NULL, NULL, NULL,
@@ -1202,7 +1362,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 			return ret;
 		}
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
-				(void **)qdma_dev->vqs[vchan].cntx_long,
+				(void **)qdma_dev->vqs[vchan].cntx_fle_sdd,
 				DPAA2_QDMA_MAX_DESC);
 		if (ret) {
 			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index c93e0098a5..252d2b1c74 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -12,17 +12,8 @@
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
-#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT		(1ULL << 0)
-#define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
-#define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
-
-/** FLE pool cache size */
-#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
-
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
 #define DPAA2_RBP_MEM_RW            0x0
@@ -40,9 +31,14 @@
 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
-#define MAX_HW_QUEUE_PER_CORE		64
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
+#define DPAA2_QDMA_FD_LONG_FORMAT 0x1
+#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
 
-#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+#define DPAA2_QDMA_BMT_ENABLE 0x1
+#define DPAA2_QDMA_BMT_DISABLE 0x0
 
 /** Source/Destination Descriptor */
 struct qdma_sdd {
@@ -99,8 +95,8 @@ struct qdma_sdd {
 #define QDMA_SG_SL_SHORT	0x1 /* short length */
 #define QDMA_SG_SL_LONG	0x0 /* long length */
 #define QDMA_SG_F	0x1 /* last sg entry */
-#define QDMA_SG_BMT_ENABLE 0x1
-#define QDMA_SG_BMT_DISABLE 0x0
+#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
+#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
 
 struct qdma_sg_entry {
 	uint32_t addr_lo;		/* address 0:31 */
@@ -166,6 +162,40 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum dpaa2_qdma_fd_type {
+	DPAA2_QDMA_FD_SHORT = 1,
+	DPAA2_QDMA_FD_LONG = 2,
+	DPAA2_QDMA_FD_SG = 3
+};
+
+#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_TYPE(att) \
+	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
+#define DPAA2_QDMA_FD_ATT_CNTX(att) \
+	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+
+static inline void
+dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
+	uint64_t addr)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
+	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
+}
+
+static inline void
+dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
+	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
+{
+	fd->simple_ddr.rsv1_att = job_idx |
+		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
+}
+
+static inline uint16_t
+dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
+{
+	return fd->simple_ddr.rsv1_att;
+}
+
 enum {
 	DPAA2_QDMA_SDD_FLE,
 	DPAA2_QDMA_SRC_FLE,
@@ -193,12 +223,6 @@ struct qdma_cntx_sg {
 	uint16_t rsv[3];
 } __rte_packed;
 
-struct qdma_cntx_long {
-	struct qdma_cntx_fle_sdd fle_sdd;
-	uint16_t cntx_idx;
-	uint16_t rsv[3];
-} __rte_packed;
-
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
 
@@ -241,6 +265,7 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	uint64_t fle_iova2va_offset;
 	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
@@ -252,6 +277,7 @@ struct qdma_virt_queue {
 	uint64_t num_enqueues;
 	/* Total number of dequeues from this VQ */
 	uint64_t num_dequeues;
+	uint64_t copy_num;
 
 	uint16_t vq_id;
 	uint32_t flags;
@@ -261,10 +287,11 @@ struct qdma_virt_queue {
 
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
-	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
 	uint16_t slient_idx;
 
 	int num_valid_jobs;
+	int using_short_fd;
 
 	struct rte_dma_stats stats;
 };
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index e49604c8fc..df21b39cae 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,19 +7,6 @@
 
 #include <rte_compat.h>
 
-#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
-#define RTE_DPAA2_QDMA_LEN_MASK \
-	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
-	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
-
-#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
-	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
-	((length) & RTE_DPAA2_QDMA_LEN_MASK)
-
 #define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
 #define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
 	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 10/30] dma/dpaa2: limit the max descriptor number
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (7 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 09/30] dma/dpaa2: add short FD support Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
                   ` (20 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For non-SG format, the index is saved in FD with
DPAA2_QDMA_FD_ATT_TYPE_OFFSET(13) bits width.

The max descriptor number of ring is power of 2, so the
eventual max number is:
((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) / 2)

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 252d2b1c74..1a4b611c08 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -8,8 +8,6 @@
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-#define DPAA2_QDMA_MAX_DESC		4096
-#define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
 #define DPAA2_DPDMAI_MAX_QUEUES	16
@@ -169,10 +167,15 @@ enum dpaa2_qdma_fd_type {
 };
 
 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_MAX_IDX \
+	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
-	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
+
+#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
+#define DPAA2_QDMA_MIN_DESC 1
 
 static inline void
 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
@@ -186,6 +189,7 @@ static inline void
 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
 {
+	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
 	fd->simple_ddr.rsv1_att = job_idx |
 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 11/30] dma/dpaa2: change the DMA copy return value
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (8 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
                   ` (19 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The return value of DMA copy/sg copy should be index of
descriptor copied in success.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index eb2d96a35e..c3e6f84a48 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -605,6 +605,11 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -ENOTSUP;
 	}
 
+	if (unlikely(!nb_src)) {
+		DPAA2_QDMA_ERR("No SG entry specified");
+		return -EINVAL;
+	}
+
 	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
 			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
@@ -681,10 +686,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num += nb_src;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num += nb_src;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 12/30] dma/dpaa2: move the qdma header to common place
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (9 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 13/30] dma/dpaa: support multi channels Gagandeep Singh
                   ` (18 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include rte_pmd_dpaax_qdma.h instead of rte_pmd_dpaa2_qdma.h
and change code accordingly.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/api/doxy-api-index.md                 |  2 +-
 doc/api/doxy-api.conf.in                  |  2 +-
 drivers/common/dpaax/meson.build          |  3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h | 23 +++++++
 drivers/dma/dpaa2/dpaa2_qdma.c            | 84 +++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h            | 10 +--
 drivers/dma/dpaa2/meson.build             |  4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h    | 23 -------
 8 files changed, 72 insertions(+), 79 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f9283154f8..ab42440733 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
   [mlx5](@ref rte_pmd_mlx5.h),
   [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
   [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
-  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+  [dpaax](@ref rte_pmd_dpaax_qdma.h),
   [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
   [dlb2](@ref rte_pmd_dlb2.h),
   [ifpga](@ref rte_pmd_ifpga.h)
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a8823c046f..33250d867c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -8,7 +8,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/cnxk \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/dma/dpaa2 \
+                          @TOPDIR@/drivers/common/dpaax \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/event/cnxk \
                           @TOPDIR@/drivers/mempool/cnxk \
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
index a162779116..db61b76ce3 100644
--- a/drivers/common/dpaax/meson.build
+++ b/drivers/common/dpaax/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
 
 if not is_linux
     build = false
@@ -16,3 +16,4 @@ endif
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
+headers = files('rte_pmd_dpaax_qdma.h')
diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
new file mode 100644
index 0000000000..2552a4adfb
--- /dev/null
+++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index c3e6f84a48..32736726f6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,7 +10,7 @@
 
 #include <mc/fsl_dpdmai.h>
 
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
@@ -212,16 +212,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 *
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
@@ -235,23 +235,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+	for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
 		/* source SG */
 		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		/* destination SG */
 		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 	}
 }
 
@@ -350,21 +348,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
 		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
 		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
@@ -444,17 +440,16 @@ fle_populate(struct qbman_fle fle[],
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
@@ -560,7 +555,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
 		if (!cntx_sg->job_nb ||
-			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
 			DPAA2_QDMA_ERR("Invalid SG job number:%d",
 				cntx_sg->job_nb);
 			return;
@@ -610,9 +605,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -EINVAL;
 	}
 
-	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+	if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
-			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+			nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
 		return -EINVAL;
 	}
 
@@ -631,11 +626,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+	cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
 
 	fle = cntx_sg->fle_sdd.fle;
 	fle_iova = cntx_iova +
@@ -667,8 +658,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			offsetof(struct qdma_cntx_sg, sg_src_entry);
 		dst_sge_iova = cntx_iova +
 			offsetof(struct qdma_cntx_sg, sg_dst_entry);
-		len = sg_entry_populate(src, dst,
-			cntx_sg, nb_src);
+		len = sg_entry_populate(src, dst, cntx_sg, nb_src);
 
 		fle_populate(fle, sdd, sdd_iova,
 			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1011,7 +1001,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 			q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			DPAA2_VADDR_TO_IOVA(dq_storage), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
 			       get_swp_active_dqs(
@@ -1046,7 +1036,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
-		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
 
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
@@ -1078,7 +1068,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
-		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -1131,11 +1121,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 		RTE_DMA_CAPA_SILENT |
 		RTE_DMA_CAPA_OPS_COPY |
 		RTE_DMA_CAPA_OPS_COPY_SG;
-	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
-	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1317,6 +1307,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char pool_name[64];
 	int ret;
 	char *env = NULL;
+	uint64_t iova, va;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1359,6 +1350,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
+	iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+	va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+	qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
 
 	if (qdma_dev->is_silent) {
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 1a4b611c08..17abdf9260 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
 
 struct qdma_cntx_sg {
 	struct qdma_cntx_fle_sdd fle_sdd;
-	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
 	uint16_t job_nb;
 	uint16_t rsv[3];
 } __rte_packed;
 
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
-	((void *)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+	((void *)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
 
 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
-	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
 
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build
index a99151e2a5..a523f5edb4 100644
--- a/drivers/dma/dpaa2/meson.build
+++ b/drivers/dma/dpaa2/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
 
 if not is_linux
     build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
deleted file mode 100644
index df21b39cae..0000000000
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
-	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
-	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
-	(((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
-	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 13/30] dma/dpaa: support multi channels
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (10 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
                   ` (17 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena

This patch add support to use multiple dma channel in the driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 10e65ef1d7..24ad7ad019 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #include <bus_dpaa_driver.h>
@@ -648,8 +648,8 @@ fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
 	}
 
 finally:
-	return fsl_qdma->desc_allocated++;
-
+	fsl_qdma->desc_allocated++;
+	return 0;
 exit:
 	return -ENOMEM;
 }
@@ -670,7 +670,7 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 			     RTE_DMA_CAPA_DEV_TO_MEM |
 			     RTE_DMA_CAPA_SILENT |
 			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 1;
+	dev_info->max_vchans = 4;
 	dev_info->max_desc = DPAADMA_MAX_DESC;
 	dev_info->min_desc = DPAADMA_MIN_DESC;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 14/30] dma/dpaa: fix job enqueue
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (11 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 13/30] dma/dpaa: support multi channels Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
                   ` (16 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: stable

The check shall be end instead of equal.

Fixes: 7da29a644c51 ("dma/dpaa: support DMA operations")
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 24ad7ad019..0a91cf040a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -615,7 +615,7 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 
 	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 
-	if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
 		reg |= FSL_QDMA_BCQMR_EI_BE;
 		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 15/30] dma/dpaa: add burst capacity API
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (12 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
                   ` (15 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Vanshika Shukla

From: Vanshika Shukla <vanshika.shukla@nxp.com>

This patch improves the dpaa qdma driver and
adds dpaa_qdma_burst_capacity API which returns the
remaining space in the descriptor ring.

Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 34 +++++++++++++++++++++++++---------
 drivers/dma/dpaa/dpaa_qdma.h |  3 +--
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 0a91cf040a..bb6b54e583 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -423,7 +423,6 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 static int
 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 				 void *block, int id, const uint16_t nb_cpls,
-				 uint16_t *last_idx,
 				 enum rte_dma_status_code *status)
 {
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
@@ -457,7 +456,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
 			fsl_status->virt_head = fsl_status->cq;
 		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
-		*last_idx = fsl_comp->index;
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
@@ -607,7 +605,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
 	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
 	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
-	fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
 	fsl_queue->virt_head++;
 
 	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
@@ -623,7 +620,7 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	} else {
 		fsl_queue->pending++;
 	}
-	return fsl_comp->index;
+	return 0;
 }
 
 static int
@@ -771,8 +768,10 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
 	struct fsl_qdma_chan *fsl_chan =
 		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	int ret;
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	int ret, idx;
 
+	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
 	void *fsl_comp = NULL;
 
 	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
@@ -783,8 +782,10 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 		return -1;
 	}
 	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	if (ret < 0)
+		return ret;
 
-	return ret;
+	return idx;
 }
 
 static uint16_t
@@ -826,8 +827,10 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, st);
+						st);
 	fsl_queue->stats.completed += intr;
+	if (last_idx != NULL)
+		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 
 	return intr;
 }
@@ -873,9 +876,10 @@ dpaa_qdma_dequeue(void *dev_private,
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, NULL);
+						NULL);
 	fsl_queue->stats.completed += intr;
-
+	if (last_idx != NULL)
+		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 	return intr;
 }
 
@@ -912,6 +916,17 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
+{
+	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
+	struct fsl_qdma_chan *fsl_chan =
+		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+	return fsl_queue->n_cq - fsl_queue->pending;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_info_get,
 	.dev_configure            = dpaa_qdma_configure,
@@ -1035,6 +1050,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
+	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
 	ret = dpaa_qdma_init(dmadev);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 7e9e76e21a..2092fb39f5 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #ifndef _DPAA_QDMA_H_
@@ -176,7 +176,6 @@ struct fsl_qdma_comp {
 	dma_addr_t		bus_addr;
 	dma_addr_t		desc_bus_addr;
 	void			*virt_addr;
-	int			index;
 	void			*desc_virt_addr;
 	struct fsl_qdma_chan	*qchan;
 	dma_call_back		call_back_func;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 16/30] dma/dpaa: add workaround for ERR050757
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (13 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
                   ` (14 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena

ERR050757 on LS104x indicates:

For outbound PCIe read transactions, a completion buffer is used
to store the PCIe completions till the data is passed back to the
initiator. At most 16 outstanding transactions are allowed and
maximum read request is 256 bytes. The completion buffer size
inside the controller needs to be at least 4KB, but the PCIe
controller has 3 KB of buffer. In case the size of pending
outbound read transactions of more than 3KB, the PCIe controller
may drop the incoming completions without notifying the initiator
of the transaction, leaving transactions unfinished. All
subsequent outbound reads to PCIe are blocked permanently.
To avoid qDMA hang as it keeps waiting for data that was silently
dropped, set stride mode for qDMA

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       |  3 ++-
 doc/guides/dmadevs/dpaa.rst  |  2 ++
 drivers/dma/dpaa/dpaa_qdma.c | 18 ++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h |  5 +++++
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 012935d5d7..f81e466318 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -468,7 +468,8 @@ soc_dpaa = {
         ['RTE_MACHINE', '"dpaa"'],
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
-        ['RTE_MAX_NUMA_NODES', 1]
+        ['RTE_MAX_NUMA_NODES', 1],
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index f99bfc6087..746919ec6b 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -42,6 +42,8 @@ Compilation
 For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
+- ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+
 Initialization
 --------------
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index bb6b54e583..a21279293c 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -159,6 +159,10 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 				      dma_addr_t dst, dma_addr_t src, u32 len)
 {
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+	u32 cfg = 0;
+#endif
 
 	/* Note: command table (fsl_comp->virt_addr) is getting filled
 	 * directly in cmd descriptors of queues while enqueuing the descriptor
@@ -171,6 +175,20 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
 	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
 
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
+	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+				FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
+		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
+					FSL_QDMA_CFG_SSS_OFFSET |
+					FSL_QDMA_CMD_SSS_DISTANCE);
+		sdf->cfg = cfg;
+	} else
+		sdf->cfg = 0;
+#endif
+
 	/* Status notification is enqueued to status queue. */
 	qdma_desc_addr_set64(csgf_src, src);
 	qdma_csgf_set_len(csgf_src, len);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 2092fb39f5..361f88856b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -81,6 +81,11 @@
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
 
+#define FSL_QDMA_CMD_SSEN		BIT(19)
+#define FSL_QDMA_CFG_SSS_OFFSET		12
+#define FSL_QDMA_CMD_SSS_STRIDE		128
+#define FSL_QDMA_CMD_SSS_DISTANCE	128
+
 #define QDMA_CCDF_STATUS		20
 #define QDMA_CCDF_OFFSET		20
 #define QDMA_CCDF_MASK			GENMASK(28, 20)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (14 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
                   ` (13 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena

Non-prefetchable read setting in the source descriptor may be
required for targets other than local memory. Prefetchable read
setting will offer better performance for misaligned transfers
in the form of fewer transactions and should be set if possible.
This patch also fixes QDMA stall issue due to unaligned
transactions.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       | 3 ++-
 doc/guides/dmadevs/dpaa.rst  | 1 +
 drivers/dma/dpaa/dpaa_qdma.c | 6 ++++++
 drivers/dma/dpaa/dpaa_qdma.h | 1 +
 4 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index f81e466318..f63ef41130 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -469,7 +469,8 @@ soc_dpaa = {
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
         ['RTE_MAX_NUMA_NODES', 1],
-	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true],
+	['RTE_DMA_DPAA_ERRATA_ERR050265', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 746919ec6b..8a7c0befc3 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -43,6 +43,7 @@ For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
 - ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+- ``RTE_DMA_DPAA_ERRATA_ERR050265`` - enable software workaround for Errata-A050265
 
 Initialization
 --------------
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index a21279293c..f1878879af 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -179,6 +179,9 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
 	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 				FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
 	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
 		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
 		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
@@ -247,6 +250,9 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		/* Descriptor Buffer */
 		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
 		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 361f88856b..8cb4042bd0 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -80,6 +80,7 @@
 
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
+#define FSL_QDMA_CMD_PF			BIT(17)
 
 #define FSL_QDMA_CMD_SSEN		BIT(19)
 #define FSL_QDMA_CFG_SSS_OFFSET		12
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 18/30] dma/dpaa: remove unwanted desc
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (15 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 19/30] dma/dpaa: data path optimization Gagandeep Singh
                   ` (12 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena

remove unwanted descriptor list maintenance
and channels overhead.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 594 +++++++++++++----------------------
 drivers/dma/dpaa/dpaa_qdma.h |  43 +--
 2 files changed, 221 insertions(+), 416 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index f1878879af..8e8426b88d 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -111,96 +111,6 @@ static void
 	return virt_addr;
 }
 
-static void
-dma_pool_free(void *addr)
-{
-	rte_free(addr);
-}
-
-static void
-fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
-	int id;
-
-	if (--fsl_queue->count)
-		goto finally;
-
-	id = (fsl_qdma->block_base - fsl_queue->block_base) /
-	      fsl_qdma->block_offset;
-
-	while (rte_atomic32_read(&wait_task[id]) == 1)
-		rte_delay_us(QDMA_DELAY);
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_used,	list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-finally:
-	fsl_qdma->desc_allocated--;
-}
-
-static void
-fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-				      dma_addr_t dst, dma_addr_t src, u32 len)
-{
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	struct fsl_qdma_sdf *sdf;
-	u32 cfg = 0;
-#endif
-
-	/* Note: command table (fsl_comp->virt_addr) is getting filled
-	 * directly in cmd descriptors of queues while enqueuing the descriptor
-	 * please refer fsl_qdma_enqueue_desc
-	 * frame list table (virt_addr) + 1) and source,
-	 * destination descriptor table
-	 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
-	 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
-	 */
-	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
-	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-				FSL_QDMA_CMD_RWTTYPE_OFFSET);
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
-#endif
-	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
-		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
-					FSL_QDMA_CFG_SSS_OFFSET |
-					FSL_QDMA_CMD_SSS_DISTANCE);
-		sdf->cfg = cfg;
-	} else
-		sdf->cfg = 0;
-#endif
-
-	/* Status notification is enqueued to status queue. */
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-}
-
 /*
  * Pre-request command descriptor and compound S/G for enqueue.
  */
@@ -209,42 +119,41 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 					struct fsl_qdma_queue *queue,
 					int size, int aligned)
 {
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_format *csgf_desc;
-	int i;
-
-	for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
-		comp_temp = rte_zmalloc("qdma: comp temp",
-					sizeof(*comp_temp), 0);
-		if (!comp_temp)
-			return -ENOMEM;
-
-		comp_temp->virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
-		if (!comp_temp->virt_addr) {
-			rte_free(comp_temp);
+	struct fsl_qdma_format *ccdf;
+	int i, j;
+	struct fsl_qdma_format *head;
+
+	head = queue->virt_head;
+
+	for (i = 0; i < (int)(queue->n_cq); i++) {
+		dma_addr_t bus_addr = 0, desc_bus_addr = 0;
+
+		queue->virt_addr[i] =
+		dma_pool_alloc(size, aligned, &bus_addr);
+		if (!queue->virt_addr[i])
 			goto fail;
-		}
 
-		comp_temp->desc_virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
-		if (!comp_temp->desc_virt_addr) {
-			rte_free(comp_temp->virt_addr);
-			rte_free(comp_temp);
+		queue->desc_virt_addr[i] =
+		dma_pool_alloc(size, aligned, &desc_bus_addr);
+		if (!queue->desc_virt_addr[i]) {
+			rte_free(queue->virt_addr[i]);
 			goto fail;
 		}
 
-		memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(comp_temp->desc_virt_addr, 0,
+		memset(queue->virt_addr[i], 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
+		memset(queue->desc_virt_addr[i], 0,
 		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
 
-		csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
-		sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
-		ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+		csgf_desc = (struct fsl_qdma_format *)queue->virt_addr[i] +
+			    QDMA_DESC_OFF;
+		sdf = (struct fsl_qdma_sdf *)queue->desc_virt_addr[i];
+		ddf = (struct fsl_qdma_ddf *)sdf + QDMA_DESC_OFF;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+		qdma_desc_addr_set64(csgf_desc, desc_bus_addr);
+
 		/* It must be 32 as Compound S/G Descriptor */
 		qdma_csgf_set_len(csgf_desc, 32);
 		/* Descriptor Buffer */
@@ -258,106 +167,84 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
 				FSL_QDMA_CMD_LWC_OFFSET);
 
-		list_add_tail(&comp_temp->list, &queue->comp_free);
+		ccdf = (struct fsl_qdma_format *)queue->virt_head;
+		qdma_desc_addr_set64(ccdf, bus_addr + 16);
+		qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(queue->virt_addr[i]));
+		qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(queue->virt_addr[i]));
+		queue->virt_head++;
 	}
+	queue->virt_head = head;
+	queue->ci = 0;
 
 	return 0;
 
 fail:
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		rte_free(comp_temp->virt_addr);
-		rte_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
+	for (j = 0; j < i; j++) {
+		rte_free(queue->virt_addr[j]);
+		rte_free(queue->desc_virt_addr[j]);
 	}
 
 	return -ENOMEM;
 }
 
-/*
- * Request a command descriptor for enqueue.
- */
-static struct fsl_qdma_comp *
-fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+static struct fsl_qdma_queue
+*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, int k, int b)
 {
-	struct fsl_qdma_queue *queue = fsl_chan->queue;
-	struct fsl_qdma_comp *comp_temp;
-
-	if (!list_empty(&queue->comp_free)) {
-		comp_temp = list_first_entry(&queue->comp_free,
-					     struct fsl_qdma_comp,
-					     list);
-		list_del(&comp_temp->list);
-		return comp_temp;
+	struct fsl_qdma_queue *queue_temp;
+
+	queue_temp = rte_zmalloc("qdma: queue head", sizeof(*queue_temp), 0);
+	if (!queue_temp) {
+		printf("no memory to allocate queues\n");
+		return NULL;
 	}
 
-	return NULL;
-}
+	queue_temp->cq =
+	dma_pool_alloc(sizeof(struct fsl_qdma_format) *
+		       QDMA_QUEUE_SIZE,
+		       sizeof(struct fsl_qdma_format) *
+		       QDMA_QUEUE_SIZE, &queue_temp->bus_addr);
 
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
-{
-	struct fsl_qdma_queue *queue_head, *queue_temp;
-	int len, i, j;
-	int queue_num;
-	int blocks;
-	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-
-	queue_num = fsl_qdma->n_queues;
-	blocks = fsl_qdma->num_blocks;
-
-	len = sizeof(*queue_head) * queue_num * blocks;
-	queue_head = rte_zmalloc("qdma: queue head", len, 0);
-	if (!queue_head)
+	if (!queue_temp->cq) {
+		rte_free(queue_temp);
 		return NULL;
-
-	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
-		queue_size[i] = QDMA_QUEUE_SIZE;
-
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-				DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
-				goto fail;
-			}
-			queue_temp = queue_head + i + (j * queue_num);
-
-			queue_temp->cq =
-			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-				       queue_size[i],
-				       sizeof(struct fsl_qdma_format) *
-				       queue_size[i], &queue_temp->bus_addr);
-
-			if (!queue_temp->cq)
-				goto fail;
-
-			memset(queue_temp->cq, 0x0, queue_size[i] *
-			       sizeof(struct fsl_qdma_format));
-
-			queue_temp->block_base = fsl_qdma->block_base +
-				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-			queue_temp->n_cq = queue_size[i];
-			queue_temp->id = i;
-			queue_temp->count = 0;
-			queue_temp->pending = 0;
-			queue_temp->virt_head = queue_temp->cq;
-			queue_temp->stats = (struct rte_dma_stats){0};
-		}
 	}
-	return queue_head;
 
-fail:
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			queue_temp = queue_head + i + (j * queue_num);
-			dma_pool_free(queue_temp->cq);
-		}
+	memset(queue_temp->cq, 0x0, QDMA_QUEUE_SIZE *
+	       sizeof(struct fsl_qdma_format));
+
+	queue_temp->queue_base = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, b);
+	queue_temp->n_cq = QDMA_QUEUE_SIZE;
+	queue_temp->id = k;
+	queue_temp->pending = 0;
+	queue_temp->virt_head = queue_temp->cq;
+	queue_temp->virt_addr = rte_malloc("queue virt addr",
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!queue_temp->virt_addr) {
+		rte_free(queue_temp->cq);
+		rte_free(queue_temp);
+		return NULL;
 	}
-	rte_free(queue_head);
+	queue_temp->desc_virt_addr = rte_malloc("queue desc virt addr",
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!queue_temp->desc_virt_addr) {
+		rte_free(queue_temp->virt_addr);
+		rte_free(queue_temp->cq);
+		rte_free(queue_temp);
+		return NULL;
+	}
+	queue_temp->stats = (struct rte_dma_stats){0};
+
+	return queue_temp;
+}
 
-	return NULL;
+static void
+fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
+{
+	rte_free(queue->desc_virt_addr);
+	rte_free(queue->virt_addr);
+	rte_free(queue->cq);
+	rte_free(queue);
 }
 
 static struct
@@ -367,11 +254,6 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	unsigned int status_size;
 
 	status_size = QDMA_STATUS_SIZE;
-	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-		DPAA_QDMA_ERR("Get wrong status_size.\n");
-		return NULL;
-	}
 
 	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
 	if (!status_head)
@@ -399,6 +281,13 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	return status_head;
 }
 
+static void
+fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
+{
+	rte_free(status->cq);
+	rte_free(status);
+}
+
 static int
 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -449,12 +338,9 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 				 void *block, int id, const uint16_t nb_cpls,
 				 enum rte_dma_status_code *status)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_queue *temp_queue;
 	struct fsl_qdma_format *status_addr;
-	struct fsl_qdma_comp *fsl_comp = NULL;
-	u32 reg, i;
+	u32 reg;
 	int count = 0;
 
 	while (count < nb_cpls) {
@@ -464,14 +350,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 
 		status_addr = fsl_status->virt_head;
 
-		i = qdma_ccdf_get_queue(status_addr) +
-			id * fsl_qdma->n_queues;
-		temp_queue = fsl_queue + i;
-		fsl_comp = list_first_entry(&temp_queue->comp_used,
-					    struct fsl_qdma_comp,
-					    list);
-		list_del(&fsl_comp->list);
-
 		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
 		reg |= FSL_QDMA_BSQMR_DI_BE;
 
@@ -483,7 +361,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
-		list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
 		count++;
 
 	}
@@ -493,7 +370,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *temp;
 	void *ctrl = fsl_qdma->ctrl_base;
 	void *block;
@@ -508,11 +384,13 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		return ret;
 	}
 
+	int k = 0;
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		for (i = 0; i < fsl_qdma->n_queues; i++) {
-			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+		k = 0;
+		for (i = (j * QDMA_QUEUES); i < ((j * QDMA_QUEUES) + QDMA_QUEUES); i++) {
+			temp = fsl_qdma->queue[i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -522,19 +400,20 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			 */
 
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQDPA_SADDR(i));
+				    block + FSL_QDMA_BCQDPA_SADDR(k));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEDPA_SADDR(i));
+				    block + FSL_QDMA_BCQEDPA_SADDR(k));
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEPA_SADDR(i));
+				    block + FSL_QDMA_BCQEPA_SADDR(k));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEEPA_SADDR(i));
+				    block + FSL_QDMA_BCQEEPA_SADDR(k));
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
 			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
+			qdma_writel(reg, block + FSL_QDMA_BCQMR(k));
+			k++;
 		}
 
 		/*
@@ -585,36 +464,19 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static void *
-fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
-			   dma_addr_t src, size_t len,
-			   void *call_back,
-			   void *param)
-{
-	struct fsl_qdma_comp *fsl_comp;
-
-	fsl_comp =
-	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
-	if (!fsl_comp)
-		return NULL;
-
-	fsl_comp->qchan = fsl_chan;
-	fsl_comp->call_back_func = call_back;
-	fsl_comp->params = param;
-
-	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-	return (void *)fsl_comp;
-}
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
-				  struct fsl_qdma_comp *fsl_comp,
-				  uint64_t flags)
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
+				  uint64_t flags, dma_addr_t dst,
+				  dma_addr_t src, size_t len)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	struct fsl_qdma_format *ccdf;
+	void *block = fsl_queue->queue_base;
+	struct fsl_qdma_format *csgf_src, *csgf_dest;
 	u32 reg;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+	u32 cfg = 0;
+#endif
 
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
@@ -624,17 +486,40 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
 		return -1;
 
-	/* filling descriptor  command table */
-	ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
-	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
-	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
+	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
+		   QDMA_SGF_SRC_OFF;
+	csgf_dest = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
+		    QDMA_SGF_DST_OFF;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = (struct fsl_qdma_sdf *)fsl_queue->desc_virt_addr[fsl_queue->ci];
+	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+			FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
+	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
+		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
+					FSL_QDMA_CFG_SSS_OFFSET |
+					FSL_QDMA_CMD_SSS_DISTANCE);
+		sdf->cfg = cfg;
+	} else
+		sdf->cfg = 0;
+#endif
+	qdma_desc_addr_set64(csgf_src, src);
+	qdma_csgf_set_len(csgf_src, len);
+	qdma_desc_addr_set64(csgf_dest, dst);
+	qdma_csgf_set_len(csgf_dest, len);
+	/* This entry is the last entry. */
+	qdma_csgf_set_f(csgf_dest, len);
 	fsl_queue->virt_head++;
+	fsl_queue->ci++;
 
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) {
 		fsl_queue->virt_head = fsl_queue->cq;
+		fsl_queue->ci = 0;
+	}
 
-	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
@@ -647,34 +532,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	return 0;
 }
 
-static int
-fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	int ret;
-
-	if (fsl_queue->count++)
-		goto finally;
-
-	INIT_LIST_HEAD(&fsl_queue->comp_free);
-	INIT_LIST_HEAD(&fsl_queue->comp_used);
-
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-				FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
-	if (ret) {
-		DPAA_QDMA_ERR(
-			"failed to alloc dma buffer for comp descriptor\n");
-		goto exit;
-	}
-
-finally:
-	fsl_qdma->desc_allocated++;
-	return 0;
-exit:
-	return -ENOMEM;
-}
-
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 	      uint32_t info_sz)
@@ -701,35 +558,26 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 static int
 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
 {
-	u32 i, start, end;
+	u32 i;
 	int ret;
+	struct fsl_qdma_queue *fsl_queue;
 
-	start = fsl_qdma->free_block_id * QDMA_QUEUES;
-	fsl_qdma->free_block_id++;
-
-	end = start + 1;
-	for (i = start; i < end; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		if (fsl_chan->free) {
-			fsl_chan->free = false;
-			ret = fsl_qdma_alloc_chan_resources(fsl_chan);
-			if (ret)
-				return ret;
-
-			fsl_qdma->vchan_map[vchan] = i;
-			return 0;
-		}
+	if (fsl_qdma->free_block_id == QDMA_BLOCKS) {
+		DPAA_QDMA_ERR("Maximum 4 queues can be configured\n");
+		return -1;
 	}
 
-	return -1;
-}
+	i = fsl_qdma->free_block_id * QDMA_QUEUES;
 
-static void
-dma_release(void *fsl_chan)
-{
-	((struct fsl_qdma_chan *)fsl_chan)->free = true;
-	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+	fsl_queue = fsl_qdma->queue[i];
+	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
+			FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+	if (ret)
+		return ret;
+
+	fsl_qdma->vchan_map[vchan] = i;
+	fsl_qdma->free_block_id++;
+	return 0;
 }
 
 static int
@@ -767,10 +615,9 @@ static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	void *block = fsl_queue->queue_base;
 	u32 reg;
 
 	while (fsl_queue->pending) {
@@ -790,22 +637,13 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 		  uint32_t length, uint64_t flags)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	int ret, idx;
 
 	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
-	void *fsl_comp = NULL;
 
-	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
-			(dma_addr_t)dst, (dma_addr_t)src,
-			length, NULL, NULL);
-	if (!fsl_comp) {
-		DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n");
-		return -1;
-	}
-	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, (dma_addr_t)dst, (dma_addr_t)src, length);
 	if (ret < 0)
 		return ret;
 
@@ -822,9 +660,8 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -870,9 +707,8 @@ dpaa_qdma_dequeue(void *dev_private,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -912,9 +748,8 @@ dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
 		    struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -931,9 +766,8 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	fsl_queue->stats = (struct rte_dma_stats){0};
 
@@ -944,9 +778,8 @@ static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
 	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -965,43 +798,21 @@ static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan;
 	uint64_t phys_addr;
-	unsigned int len;
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i;
+	u32 i, k = 0;
+	int j;
 
-	fsl_qdma->desc_allocated = 0;
-	fsl_qdma->n_chans = VIRT_CHANNELS;
-	fsl_qdma->n_queues = QDMA_QUEUES;
+	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
 
-	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
-	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
-	if (!fsl_qdma->chans)
-		return -1;
-
-	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
-	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
-	if (!fsl_qdma->status) {
-		rte_free(fsl_qdma->chans);
-		return -1;
-	}
-
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		rte_atomic32_init(&wait_task[i]);
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
-		if (!fsl_qdma->status[i])
-			goto err;
-	}
-
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		goto err;
+		return -1;
 	}
 
 	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
@@ -1014,39 +825,55 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (fsl_qdma->ctrl_base == MAP_FAILED) {
 		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
 		       "size %d\n", phys_addr, regs_size);
-		goto err;
+		return -1;
 	}
 
 	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
 	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
 
-	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
+	fsl_qdma->status = rte_malloc("status queue", sizeof(struct fsl_qdma_queue) * 4, 0);
+	if (!fsl_qdma->status)
+		goto err;
+
+	fsl_qdma->queue = rte_malloc("cmd queue", sizeof(struct fsl_qdma_queue) * 32, 0);
 	if (!fsl_qdma->queue) {
-		munmap(fsl_qdma->ctrl_base, regs_size);
+		rte_free(fsl_qdma->status);
 		goto err;
 	}
 
-	for (i = 0; i < fsl_qdma->n_chans; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+		if (!fsl_qdma->status[i])
+			goto mem_free;
+		j = 0;
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++) {
+			fsl_qdma->queue[k] = fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
+			if (!fsl_qdma->queue[k])
+				goto mem_free;
+			j++;
+		}
 
-		fsl_chan->qdma = fsl_qdma;
-		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-							fsl_qdma->num_blocks);
-		fsl_chan->free = true;
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+		rte_free(fsl_qdma->status);
+		goto mem_free;
 	}
 
 	return 0;
 
-err:
-	rte_free(fsl_qdma->chans);
+mem_free:
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
+			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
+	}
 	rte_free(fsl_qdma->status);
+err:
+	rte_free(fsl_qdma->queue);
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	return -1;
 }
@@ -1092,17 +919,16 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+	uint32_t i, k;
 
-	for (i = 0; i < max; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		if (fsl_chan->free == false)
-			dma_release(fsl_chan);
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
+			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
 	}
 
+	rte_free(fsl_qdma->queue);
 	rte_free(fsl_qdma->status);
-	rte_free(fsl_qdma->chans);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 8cb4042bd0..80366ce890 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -107,6 +107,9 @@
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
 #define QDMA_DELAY			1000
+#define QDMA_SGF_SRC_OFF		2
+#define QDMA_SGF_DST_OFF		3
+#define QDMA_DESC_OFF			1
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -157,55 +160,31 @@ struct fsl_qdma_ddf {
 	__le32 cmd;
 };
 
-struct fsl_qdma_chan {
-	struct fsl_qdma_engine	*qdma;
-	struct fsl_qdma_queue	*queue;
-	bool			free;
-	struct list_head	list;
-};
-
 struct fsl_qdma_queue {
 	struct fsl_qdma_format	*virt_head;
-	struct list_head	comp_used;
-	struct list_head	comp_free;
-	dma_addr_t		bus_addr;
-	u32			n_cq;
-	u32			id;
-	u32			count;
-	u32			pending;
+	void                    **virt_addr;
+	u8			ci;
+	u8			n_cq;
+	u8			id;
+	void			*queue_base;
 	struct fsl_qdma_format	*cq;
-	void			*block_base;
 	struct rte_dma_stats	stats;
-};
-
-struct fsl_qdma_comp {
+	u8			pending;
 	dma_addr_t		bus_addr;
-	dma_addr_t		desc_bus_addr;
-	void			*virt_addr;
-	void			*desc_virt_addr;
-	struct fsl_qdma_chan	*qchan;
-	dma_call_back		call_back_func;
-	void			*params;
-	struct list_head	list;
+	void			**desc_virt_addr;
 };
 
 struct fsl_qdma_engine {
-	int			desc_allocated;
 	void			*ctrl_base;
 	void			*status_base;
 	void			*block_base;
-	u32			n_chans;
 	u32			n_queues;
-	int			error_irq;
-	struct fsl_qdma_queue	*queue;
+	struct fsl_qdma_queue	**queue;
 	struct fsl_qdma_queue	**status;
-	struct fsl_qdma_chan	*chans;
 	u32			num_blocks;
 	u8			free_block_id;
 	u32			vchan_map[4];
 	int			block_offset;
 };
 
-static rte_atomic32_t wait_task[CORE_NUMBER];
-
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 19/30] dma/dpaa: data path optimization
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (16 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 20/30] dma/dpaa: refactor driver Gagandeep Singh
                   ` (11 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena

Remove unnessary status read before every send.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 186 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 ++
 2 files changed, 101 insertions(+), 92 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8e8426b88d..4022ad6469 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -248,7 +248,8 @@ fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 }
 
 static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+					   u32 id)
 {
 	struct fsl_qdma_queue *status_head;
 	unsigned int status_size;
@@ -277,6 +278,8 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	       sizeof(struct fsl_qdma_format));
 	status_head->n_cq = status_size;
 	status_head->virt_head = status_head->cq;
+	status_head->queue_base = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	return status_head;
 }
@@ -334,12 +337,9 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
+fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
 				 enum rte_dma_status_code *status)
 {
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_format *status_addr;
 	u32 reg;
 	int count = 0;
 
@@ -348,16 +348,7 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (reg & FSL_QDMA_BSQSR_QE_BE)
 			return count;
 
-		status_addr = fsl_status->virt_head;
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
@@ -472,19 +463,37 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 {
 	void *block = fsl_queue->queue_base;
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
-	u32 reg;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
 	u32 cfg = 0;
 #endif
 
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	u32 reg;
+
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
 	 */
 	reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
+	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
+		DPAA_QDMA_ERR("QDMA Engine is busy\n");
 		return -1;
+	}
+#else
+	/* check whether critical watermark level reached,
+	 * below check is valid for only single queue per block
+	 */
+	if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
+			>= QDMA_QUEUE_CR_WM) {
+		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
+		return -1;
+	}
+#endif
+	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
+		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
+		return -1;
+	}
 
 	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
 		   QDMA_SGF_SRC_OFF;
@@ -512,19 +521,14 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 	qdma_csgf_set_len(csgf_dest, len);
 	/* This entry is the last entry. */
 	qdma_csgf_set_f(csgf_dest, len);
-	fsl_queue->virt_head++;
 	fsl_queue->ci++;
 
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) {
-		fsl_queue->virt_head = fsl_queue->cq;
+	if (fsl_queue->ci == fsl_queue->n_cq)
 		fsl_queue->ci = 0;
-	}
-
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI,
+			       block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->stats.submitted++;
 	} else {
 		fsl_queue->pending++;
@@ -618,12 +622,9 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	void *block = fsl_queue->queue_base;
-	u32 reg;
 
 	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI, block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->pending--;
 		fsl_queue->stats.submitted++;
 	}
@@ -656,44 +657,43 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			 enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	void *status = fsl_qdma->status_base;
+	int intr;
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, st);
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+#endif
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			fsl_queue->stats.errors++;
+		}
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						st);
-	fsl_queue->stats.completed += intr;
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 
-	return intr;
+	return ret;
 }
 
 
@@ -703,44 +703,46 @@ dpaa_qdma_dequeue(void *dev_private,
 		  uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	void *status = fsl_qdma->status_base;
+	int intr;
+#endif
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
+	*has_error = false;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, NULL);
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			intr = qdma_readl(status + FSL_QDMA_DEDR);
+			*has_error = true;
+			fsl_queue->stats.errors++;
+		}
 	}
-
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						NULL);
-	fsl_queue->stats.completed += intr;
+#endif
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
-	return intr;
+	return ret;
 }
 
 static int
@@ -842,7 +844,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	}
 
 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
 		if (!fsl_qdma->status[i])
 			goto mem_free;
 		j = 0;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 80366ce890..8a4517a70a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -58,11 +58,17 @@
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
+/* Update the value appropriately whenever QDMA_QUEUE_SIZE
+ * changes.
+ */
+#define FSL_QDMA_BCQMR_EI		0x20c0
+
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
 #define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -110,6 +116,7 @@
 #define QDMA_SGF_SRC_OFF		2
 #define QDMA_SGF_DST_OFF		3
 #define QDMA_DESC_OFF			1
+#define QDMA_QUEUE_CR_WM		32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 20/30] dma/dpaa: refactor driver
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (17 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 19/30] dma/dpaa: data path optimization Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
                   ` (10 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Return complete index instead of total complete counter
in complete callback.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 534 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h | 109 +++----
 2 files changed, 330 insertions(+), 313 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 4022ad6469..dc17aa4520 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -15,19 +15,6 @@ qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
-{
-	return ccdf->cfg8b_w1 & 0xff;
-}
-
-static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
-{
-	return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_OFFSET;
-}
-
 static inline void
 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
 {
@@ -59,8 +46,7 @@ qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
 	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
 }
 
-static inline int
-ilog2(int x)
+static inline int ilog2(int x)
 {
 	int log = 0;
 
@@ -73,32 +59,50 @@ ilog2(int x)
 	return log;
 }
 
-static u32
+static inline int ilog2_qsize(uint32_t q_size)
+{
+	return (ilog2(q_size) - ilog2(64));
+}
+
+static inline int ilog2_qthld(uint32_t q_thld)
+{
+	return (ilog2(q_thld) - ilog2(16));
+}
+
+static inline int
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
+{
+	struct rte_dma_stats *stats = &fsl_queue->stats;
+
+	return (stats->submitted - stats->completed);
+}
+
+static uint32_t
 qdma_readl(void *addr)
 {
 	return QDMA_IN(addr);
 }
 
 static void
-qdma_writel(u32 val, void *addr)
+qdma_writel(uint32_t val, void *addr)
 {
 	QDMA_OUT(addr, val);
 }
 
-static u32
+static uint32_t
 qdma_readl_be(void *addr)
 {
 	return QDMA_IN_BE(addr);
 }
 
 static void
-qdma_writel_be(u32 val, void *addr)
+qdma_writel_be(uint32_t val, void *addr)
 {
 	QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
@@ -115,47 +119,48 @@ static void
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-					struct fsl_qdma_queue *queue,
-					int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *csgf_desc;
 	struct fsl_qdma_format *ccdf;
-	int i, j;
+	uint16_t i, j;
 	struct fsl_qdma_format *head;
+	struct fsl_qdma_cmpd_ft *ft;
+	struct fsl_qdma_df *df;
 
-	head = queue->virt_head;
+	head = queue->cmd_desc;
 
-	for (i = 0; i < (int)(queue->n_cq); i++) {
-		dma_addr_t bus_addr = 0, desc_bus_addr = 0;
+	for (i = 0; i < queue->n_cq; i++) {
+		dma_addr_t phy_ft = 0, phy_df = 0;
 
-		queue->virt_addr[i] =
-		dma_pool_alloc(size, aligned, &bus_addr);
-		if (!queue->virt_addr[i])
+		queue->ft[i] =
+			dma_pool_alloc(sizeof(struct fsl_qdma_cmpd_ft),
+				RTE_CACHE_LINE_SIZE, &phy_ft);
+		if (!queue->ft[i])
 			goto fail;
 
-		queue->desc_virt_addr[i] =
-		dma_pool_alloc(size, aligned, &desc_bus_addr);
-		if (!queue->desc_virt_addr[i]) {
-			rte_free(queue->virt_addr[i]);
+		queue->df[i] =
+			dma_pool_alloc(sizeof(struct fsl_qdma_df),
+				RTE_CACHE_LINE_SIZE, &phy_df);
+		if (!queue->df[i]) {
+			rte_free(queue->ft[i]);
 			goto fail;
 		}
 
-		memset(queue->virt_addr[i], 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(queue->desc_virt_addr[i], 0,
-		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
+		memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
+		memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
 
-		csgf_desc = (struct fsl_qdma_format *)queue->virt_addr[i] +
-			    QDMA_DESC_OFF;
-		sdf = (struct fsl_qdma_sdf *)queue->desc_virt_addr[i];
-		ddf = (struct fsl_qdma_ddf *)sdf + QDMA_DESC_OFF;
+		ft = queue->ft[i];
+		df = queue->df[i];
+		sdf = &df->sdf;
+		ddf = &df->ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, desc_bus_addr);
+		qdma_desc_addr_set64(&ft->desc_buf, phy_df);
 
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(csgf_desc, 32);
+		qdma_csgf_set_len(&ft->desc_buf,
+			sizeof(struct fsl_qdma_df));
 		/* Descriptor Buffer */
 		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
@@ -167,73 +172,72 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
 				FSL_QDMA_CMD_LWC_OFFSET);
 
-		ccdf = (struct fsl_qdma_format *)queue->virt_head;
-		qdma_desc_addr_set64(ccdf, bus_addr + 16);
-		qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(queue->virt_addr[i]));
-		qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(queue->virt_addr[i]));
-		queue->virt_head++;
+		ccdf = queue->cmd_desc;
+		qdma_desc_addr_set64(ccdf, phy_ft);
+		qdma_ccdf_set_format(ccdf, 0);
+		qdma_ccdf_set_ser(ccdf, 0);
+		queue->cmd_desc++;
 	}
-	queue->virt_head = head;
+	queue->cmd_desc = head;
 	queue->ci = 0;
 
 	return 0;
 
 fail:
 	for (j = 0; j < i; j++) {
-		rte_free(queue->virt_addr[j]);
-		rte_free(queue->desc_virt_addr[j]);
+		rte_free(queue->ft[j]);
+		rte_free(queue->df[j]);
 	}
 
 	return -ENOMEM;
 }
 
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, int k, int b)
+static struct fsl_qdma_queue *
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+	int queue_id, int block_id)
 {
 	struct fsl_qdma_queue *queue_temp;
+	uint32_t queue_size;
 
-	queue_temp = rte_zmalloc("qdma: queue head", sizeof(*queue_temp), 0);
+	queue_temp = rte_zmalloc("qdma: queue head",
+		sizeof(struct fsl_qdma_queue), 0);
 	if (!queue_temp) {
-		printf("no memory to allocate queues\n");
+		DPAA_QDMA_ERR("no memory to allocate queues\n");
 		return NULL;
 	}
+	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
 
-	queue_temp->cq =
-	dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-		       QDMA_QUEUE_SIZE,
-		       sizeof(struct fsl_qdma_format) *
-		       QDMA_QUEUE_SIZE, &queue_temp->bus_addr);
-
+	queue_temp->cq = dma_pool_alloc(queue_size,
+		queue_size, &queue_temp->bus_addr);
 	if (!queue_temp->cq) {
 		rte_free(queue_temp);
 		return NULL;
 	}
 
-	memset(queue_temp->cq, 0x0, QDMA_QUEUE_SIZE *
-	       sizeof(struct fsl_qdma_format));
+	memset(queue_temp->cq, 0x0, queue_size);
 
-	queue_temp->queue_base = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, b);
+	queue_temp->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 	queue_temp->n_cq = QDMA_QUEUE_SIZE;
-	queue_temp->id = k;
+	queue_temp->queue_id = queue_id;
 	queue_temp->pending = 0;
-	queue_temp->virt_head = queue_temp->cq;
-	queue_temp->virt_addr = rte_malloc("queue virt addr",
+	queue_temp->cmd_desc = queue_temp->cq;
+	queue_temp->ft = rte_malloc("Compound Frame Table",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->virt_addr) {
+	if (!queue_temp->ft) {
 		rte_free(queue_temp->cq);
 		rte_free(queue_temp);
 		return NULL;
 	}
-	queue_temp->desc_virt_addr = rte_malloc("queue desc virt addr",
+	queue_temp->df = rte_malloc("Descriptor Buffer",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->desc_virt_addr) {
-		rte_free(queue_temp->virt_addr);
+	if (!queue_temp->df) {
+		rte_free(queue_temp->ft);
 		rte_free(queue_temp->cq);
 		rte_free(queue_temp);
 		return NULL;
 	}
-	queue_temp->stats = (struct rte_dma_stats){0};
+	memset(&queue_temp->stats, 0, sizeof(struct rte_dma_stats));
 
 	return queue_temp;
 }
@@ -241,45 +245,43 @@ static struct fsl_qdma_queue
 static void
 fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 {
-	rte_free(queue->desc_virt_addr);
-	rte_free(queue->virt_addr);
+	rte_free(queue->df);
+	rte_free(queue->ft);
 	rte_free(queue->cq);
 	rte_free(queue);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
-					   u32 id)
+static struct fsl_qdma_queue *
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+	uint32_t block_id)
 {
 	struct fsl_qdma_queue *status_head;
-	unsigned int status_size;
+	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE;
+	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
 
-	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
+	status_head = rte_zmalloc("qdma: status head",
+		sizeof(*status_head), 0);
 	if (!status_head)
 		return NULL;
 
 	/*
 	 * Buffer for queue command
 	 */
-	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 &status_head->bus_addr);
+	status_head->cq = dma_pool_alloc(status_size,
+		status_size, &status_head->bus_addr);
 
 	if (!status_head->cq) {
 		rte_free(status_head);
 		return NULL;
 	}
 
-	memset(status_head->cq, 0x0, status_size *
-	       sizeof(struct fsl_qdma_format));
+	memset(status_head->cq, 0x0, status_size);
 	status_head->n_cq = status_size;
-	status_head->virt_head = status_head->cq;
-	status_head->queue_base = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
+	status_head->cmd_desc = status_head->cq;
+	status_head->block_id = block_id;
+	status_head->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
 	return status_head;
 }
@@ -294,11 +296,11 @@ fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
 static int
 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 {
-	void *ctrl = fsl_qdma->ctrl_base;
-	void *block;
+	uint8_t *ctrl = fsl_qdma->ctrl_base;
+	uint8_t *block;
 	int i, count = RETRIES;
 	unsigned int j;
-	u32 reg;
+	uint32_t reg;
 
 	/* Disable the command queue and wait for idle state. */
 	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
@@ -337,11 +339,13 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
-				 enum rte_dma_status_code *status)
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
+	const uint16_t nb_cpls,
+	enum rte_dma_status_code *status)
 {
-	u32 reg;
+	uint32_t reg;
 	int count = 0;
+	uint8_t *block = fsl_queue->block_vir;
 
 	while (count < nb_cpls) {
 		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
@@ -351,9 +355,11 @@ fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
 		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
+		fsl_queue->complete++;
+		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
+			fsl_queue->complete = 0;
 
 		count++;
-
 	}
 	return count;
 }
@@ -363,9 +369,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
 	struct fsl_qdma_queue *temp;
 	void *ctrl = fsl_qdma->ctrl_base;
-	void *block;
-	u32 i, j;
-	u32 reg;
+	uint8_t *block;
+	uint32_t i, j, k = 0;
+	uint32_t reg;
 	int ret, val;
 
 	/* Try to halt the qDMA engine first. */
@@ -375,13 +381,11 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		return ret;
 	}
 
-	int k = 0;
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		k = 0;
-		for (i = (j * QDMA_QUEUES); i < ((j * QDMA_QUEUES) + QDMA_QUEUES); i++) {
-			temp = fsl_qdma->queue[i];
+		for (i = 0; i < QDMA_QUEUES; i++) {
+			temp = fsl_qdma->queue[k];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -391,19 +395,20 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			 */
 
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQDPA_SADDR(k));
+				    block + FSL_QDMA_BCQDPA_SADDR(i));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEDPA_SADDR(k));
+				    block + FSL_QDMA_BCQEDPA_SADDR(i));
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEPA_SADDR(k));
+				    block + FSL_QDMA_BCQEPA_SADDR(i));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEEPA_SADDR(k));
+				    block + FSL_QDMA_BCQEEPA_SADDR(i));
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
-			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-			qdma_writel(reg, block + FSL_QDMA_BCQMR(k));
+			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+			temp->le_cqmr = reg;
+			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
 			k++;
 		}
 
@@ -423,18 +428,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEEPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEPAR);
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEDPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQDPAR);
+		temp = fsl_qdma->status[j];
+		qdma_writel(upper_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEEPAR);
+		qdma_writel(lower_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEPAR);
+		qdma_writel(upper_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEDPAR);
+		qdma_writel(lower_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -443,7 +445,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+		val = ilog2_qsize(temp->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -455,52 +457,51 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-
 static int
 fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
-				  uint64_t flags, dma_addr_t dst,
-				  dma_addr_t src, size_t len)
+	uint64_t flags, dma_addr_t dst,
+	dma_addr_t src, size_t len)
 {
-	void *block = fsl_queue->queue_base;
+	uint8_t *block = fsl_queue->block_vir;
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
-	u32 cfg = 0;
+	uint32_t cfg = 0;
 #endif
 
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	u32 reg;
+	uint32_t reg;
 
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
 	 */
 	reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->id));
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
 		DPAA_QDMA_ERR("QDMA Engine is busy\n");
-		return -1;
+		return -EBUSY;
 	}
 #else
 	/* check whether critical watermark level reached,
 	 * below check is valid for only single queue per block
 	 */
-	if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
-			>= QDMA_QUEUE_CR_WM) {
+	if (fsl_qdma_queue_bd_in_hw(fsl_queue) >= QDMA_QUEUE_CR_WM) {
 		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
-		return -1;
+		return -ENOSPC;
 	}
 #endif
+
 	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
 		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
-		return -1;
+		return -ENOSPC;
 	}
 
-	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
-		   QDMA_SGF_SRC_OFF;
-	csgf_dest = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
-		    QDMA_SGF_DST_OFF;
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = (struct fsl_qdma_sdf *)fsl_queue->desc_virt_addr[fsl_queue->ci];
+	sdf = fsl_queue->df[fsl_queue->ci];
 	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			FSL_QDMA_CMD_RWTTYPE_OFFSET);
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
@@ -527,67 +528,57 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 		fsl_queue->ci = 0;
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		qdma_writel_be(FSL_QDMA_BCQMR_EI,
-			       block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
 		fsl_queue->stats.submitted++;
 	} else {
 		fsl_queue->pending++;
 	}
-	return 0;
+
+	if (fsl_queue->ci)
+		return fsl_queue->ci - 1;
+	else
+		return fsl_queue->n_cq;
 }
 
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	      uint32_t info_sz)
+	__rte_unused uint32_t info_sz)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
-
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 4;
-	dev_info->max_desc = DPAADMA_MAX_DESC;
-	dev_info->min_desc = DPAADMA_MIN_DESC;
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = fsl_qdma->n_queues;
+	dev_info->max_desc = QDMA_QUEUE_SIZE;
+	dev_info->min_desc = QDMA_QUEUE_SIZE;
 
 	return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+	uint16_t vchan)
 {
-	u32 i;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue;
-
-	if (fsl_qdma->free_block_id == QDMA_BLOCKS) {
-		DPAA_QDMA_ERR("Maximum 4 queues can be configured\n");
-		return -1;
-	}
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-	i = fsl_qdma->free_block_id * QDMA_QUEUES;
+	if (fsl_queue->used)
+		return 0;
 
-	fsl_queue = fsl_qdma->queue[i];
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-			FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
 	if (ret)
 		return ret;
 
-	fsl_qdma->vchan_map[vchan] = i;
-	fsl_qdma->free_block_id++;
+	fsl_queue->used = 1;
+
 	return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-		    __rte_unused const struct rte_dma_conf *dev_conf,
-		    __rte_unused uint32_t conf_sz)
+	__rte_unused const struct rte_dma_conf *dev_conf,
+	__rte_unused uint32_t conf_sz)
 {
 	return 0;
 }
@@ -606,9 +597,9 @@ dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
 
 static int
 dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
-		      uint16_t vchan,
-		      __rte_unused const struct rte_dma_vchan_conf *conf,
-		      __rte_unused uint32_t conf_sz)
+	uint16_t vchan,
+	__rte_unused const struct rte_dma_vchan_conf *conf,
+	__rte_unused uint32_t conf_sz)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
 
@@ -618,13 +609,13 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-	void *block = fsl_queue->queue_base;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	void *block = fsl_queue->block_vir;
 
 	while (fsl_queue->pending) {
-		qdma_writel_be(FSL_QDMA_BCQMR_EI, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
 		fsl_queue->pending--;
 		fsl_queue->stats.submitted++;
 	}
@@ -634,37 +625,31 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-		  rte_iova_t src, rte_iova_t dst,
-		  uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-	int ret, idx;
-
-	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	int ret;
 
-	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, (dma_addr_t)dst, (dma_addr_t)src, length);
-	if (ret < 0)
-		return ret;
+	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
 
-	return idx;
+	return ret;
 }
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			 const uint16_t nb_cpls, uint16_t *last_idx,
-			 enum rte_dma_status_code *st)
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 	void *status = fsl_qdma->status_base;
 	int intr;
 
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-					       nb_cpls, st);
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+			nb_cpls, st);
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 		if (intr) {
@@ -690,8 +675,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	}
 
 	fsl_queue->stats.completed += ret;
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+	if (last_idx) {
+		if (unlikely(!fsl_queue->complete))
+			*last_idx = fsl_queue->n_cq - 1;
+		else
+			*last_idx = fsl_queue->complete - 1;
+	}
 
 	return ret;
 }
@@ -699,21 +688,20 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-		  uint16_t vchan, const uint16_t nb_cpls,
-		  uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
 	int intr;
 #endif
 
 	*has_error = false;
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-					       nb_cpls, NULL);
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+			nb_cpls, NULL);
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
@@ -740,23 +728,27 @@ dpaa_qdma_dequeue(void *dev_private,
 	}
 #endif
 	fsl_queue->stats.completed += ret;
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+	if (last_idx) {
+		if (unlikely(!fsl_queue->complete))
+			*last_idx = fsl_queue->n_cq - 1;
+		else
+			*last_idx = fsl_queue->complete - 1;
+	}
+
 	return ret;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
 		return -EINVAL;
-	if (rte_stats == NULL)
+	if (!rte_stats)
 		return -EINVAL;
 
 	*rte_stats = *stats;
@@ -768,10 +760,9 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-	fsl_queue->stats = (struct rte_dma_stats){0};
+	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
 	return 0;
 }
@@ -779,9 +770,8 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
-	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	const struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -804,8 +794,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i, k = 0;
-	int j;
+	uint32_t i, j, k;
 
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
@@ -814,47 +803,63 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		return -1;
+		return ccsr_qdma_fd;
 	}
 
-	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 	phys_addr = QDMA_CCSR_BASE;
-	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-					 PROT_WRITE, MAP_SHARED,
-					 ccsr_qdma_fd, phys_addr);
+	fsl_qdma->reg_base = mmap(NULL, regs_size,
+		PROT_READ | PROT_WRITE, MAP_SHARED,
+		ccsr_qdma_fd, phys_addr);
 
 	close(ccsr_qdma_fd);
-	if (fsl_qdma->ctrl_base == MAP_FAILED) {
-		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-		       "size %d\n", phys_addr, regs_size);
-		return -1;
+	if (fsl_qdma->reg_base == MAP_FAILED) {
+		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIu64"), size(%d)",
+			phys_addr, regs_size);
+		return -ENOMEM;
 	}
 
-	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-	fsl_qdma->status = rte_malloc("status queue", sizeof(struct fsl_qdma_queue) * 4, 0);
-	if (!fsl_qdma->status)
+	fsl_qdma->ctrl_base =
+		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+	fsl_qdma->status_base =
+		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+	fsl_qdma->block_base =
+		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+	fsl_qdma->status = rte_malloc("status queue",
+		sizeof(struct fsl_qdma_queue) * QDMA_BLOCKS, 0);
+	if (!fsl_qdma->status) {
+		ret = -ENOMEM;
 		goto err;
+	}
 
-	fsl_qdma->queue = rte_malloc("cmd queue", sizeof(struct fsl_qdma_queue) * 32, 0);
+	fsl_qdma->queue = rte_malloc("cmd queue",
+		sizeof(struct fsl_qdma_queue) * fsl_qdma->n_queues, 0);
 	if (!fsl_qdma->queue) {
 		rte_free(fsl_qdma->status);
+		ret = -ENOMEM;
 		goto err;
 	}
 
+	k = 0;
 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
-		if (!fsl_qdma->status[i])
+		fsl_qdma->status[i] =
+			fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (!fsl_qdma->status[i]) {
+			ret = -ENOMEM;
 			goto mem_free;
+		}
 		j = 0;
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++) {
-			fsl_qdma->queue[k] = fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
-			if (!fsl_qdma->queue[k])
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_qdma->queue[k] =
+				fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
+			if (!fsl_qdma->queue[k]) {
+				ret = -ENOMEM;
 				goto mem_free;
-			j++;
+			}
+			k++;
 		}
-
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
@@ -867,17 +872,20 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	return 0;
 
 mem_free:
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
-			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
-		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-	}
-	rte_free(fsl_qdma->status);
+	for (j = 0; j < k; j++)
+		fsl_qdma_free_queue_resources(fsl_qdma->queue[j]);
+
+	for (j = 0; j < i; j++)
+		fsl_qdma_free_status_queue(fsl_qdma->status[j]);
+
+	if (fsl_qdma->status)
+		rte_free(fsl_qdma->status);
 err:
-	rte_free(fsl_qdma->queue);
+	if (fsl_qdma->queue)
+		rte_free(fsl_qdma->queue);
 	munmap(fsl_qdma->ctrl_base, regs_size);
 
-	return -1;
+	return ret;
 }
 
 static int
@@ -921,13 +929,13 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	uint32_t i, k;
+	uint32_t i;
 
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
-			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
 		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-	}
+
+	for (i = 0; i < fsl_qdma->num_blocks * QDMA_QUEUES; i++)
+		fsl_qdma_free_queue_resources(fsl_qdma->queue[i]);
 
 	rte_free(fsl_qdma->queue);
 	rte_free(fsl_qdma->status);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 8a4517a70a..25954ef3a4 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -11,7 +11,6 @@
 #define BIT(nr)		(1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES	5
 
 #ifndef GENMASK
@@ -20,6 +19,12 @@
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
 #define FSL_QDMA_DEDR			0xe04
@@ -54,19 +59,18 @@
 #define FSL_QDMA_QUEUE_MAX		8
 
 #define FSL_QDMA_BCQMR_EN		0x80000000
-#define FSL_QDMA_BCQMR_EI_BE		0x40
+#define FSL_QDMA_BCQMR_EI		0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
 /* Update the value appropriately whenever QDMA_QUEUE_SIZE
  * changes.
  */
-#define FSL_QDMA_BCQMR_EI		0x20c0
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
-#define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
 #define FSL_QDMA_BSQMR_DI		0xc0
 
@@ -75,8 +79,6 @@
 #define FSL_QDMA_DMR_DQD		0x40000000
 #define FSL_QDMA_DSR_DB			0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE	64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
@@ -106,16 +108,11 @@
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		64
+#define QDMA_STATUS_SIZE		QDMA_QUEUE_SIZE
 #define QDMA_CCSR_BASE			0x8380000
-#define VIRT_CHANNELS			32
 #define QDMA_BLOCK_OFFSET		0x10000
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
-#define QDMA_DELAY			1000
-#define QDMA_SGF_SRC_OFF		2
-#define QDMA_SGF_DST_OFF		3
-#define QDMA_DESC_OFF			1
 #define QDMA_QUEUE_CR_WM		32
 
 #define QDMA_BIG_ENDIAN			1
@@ -134,64 +131,76 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
 	(((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
 struct fsl_qdma_format {
-	__le32 status; /* ser, status */
-	__le32 cfg;	/* format, offset */
+	uint32_t status; /* ser, status */
+	uint32_t cfg;	/* format, offset */
 	union {
 		struct {
-			__le32 addr_lo;	/* low 32-bits of 40-bit address */
-			u8 addr_hi;	/* high 8-bits of 40-bit address */
-			u8 __reserved1[2];
-			u8 cfg8b_w1; /* dd, queue */
+			uint32_t addr_lo; /* low 32-bits of 40-bit address */
+			uint8_t addr_hi; /* high 8-bits of 40-bit address */
+			uint8_t __reserved1[2];
+			uint8_t cfg8b_w1; /* dd, queue */
 		};
-		__le64 data;
+		uint64_t data;
 	};
 };
 
 /* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	__le32 rev3;
-	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	__le32 rev5;
-	__le32 cmd;
+	uint32_t rev3;
+	uint32_t cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+	uint32_t rev5;
+	uint32_t cmd;
 };
 
 /* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	__le32 rev1;
-	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	__le32 rev3;
-	__le32 cmd;
+	uint32_t rev1;
+	uint32_t cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+	uint32_t rev3;
+	uint32_t cmd;
+};
+
+struct fsl_qdma_df {
+	struct fsl_qdma_sdf sdf;
+	struct fsl_qdma_ddf ddf;
+};
+
+struct fsl_qdma_cmpd_ft {
+	struct fsl_qdma_format desc_buf;
+	struct fsl_qdma_format desc_sbuf;
+	struct fsl_qdma_format desc_dbuf;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format	*virt_head;
-	void                    **virt_addr;
-	u8			ci;
-	u8			n_cq;
-	u8			id;
-	void			*queue_base;
-	struct fsl_qdma_format	*cq;
-	struct rte_dma_stats	stats;
-	u8			pending;
-	dma_addr_t		bus_addr;
-	void			**desc_virt_addr;
+	struct fsl_qdma_format *cmd_desc;
+	int used;
+	struct fsl_qdma_cmpd_ft **ft;
+	uint16_t ci;
+	uint16_t complete;
+	uint16_t n_cq;
+	uint8_t block_id;
+	uint8_t queue_id;
+	void *block_vir;
+	uint32_t le_cqmr;
+	struct fsl_qdma_format *cq;
+	struct rte_dma_stats stats;
+	uint8_t pending;
+	dma_addr_t bus_addr;
+	struct fsl_qdma_df **df;
 };
 
 struct fsl_qdma_engine {
-	void			*ctrl_base;
-	void			*status_base;
-	void			*block_base;
-	u32			n_queues;
-	struct fsl_qdma_queue	**queue;
-	struct fsl_qdma_queue	**status;
-	u32			num_blocks;
-	u8			free_block_id;
-	u32			vchan_map[4];
-	int			block_offset;
+	void *reg_base;
+	void *ctrl_base;
+	void *status_base;
+	void *block_base;
+	uint32_t n_queues;
+	struct fsl_qdma_queue **queue;
+	struct fsl_qdma_queue **status;
+	uint32_t num_blocks;
+	int block_offset;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 21/30] dma/dpaa: dequeue status queue
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (18 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 20/30] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
                   ` (9 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

To support multiple command queues of each block, status queue
need support to notify which command queue of block is completed.

The multiple command queues are balanced to blocks in setup.
If multiple command queues are enabled in one block,
de-queue status is performed instead of checking completion.

DQ operation is not performed in silent mode.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 351 +++++++++++++++++++++--------------
 drivers/dma/dpaa/dpaa_qdma.h |  38 +++-
 2 files changed, 242 insertions(+), 147 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index dc17aa4520..646542eb8f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -34,6 +34,30 @@ qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
 	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
 }
 
+static inline void
+qdma_ccdf_set_queue(struct fsl_qdma_format *ccdf,
+	uint8_t queue_idx)
+{
+	ccdf->queue = queue_idx;
+}
+
+static inline int
+qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
+	uint8_t *queue_idx)
+{
+	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
+
+	if (addr && queue_idx)
+		*queue_idx = ccdf->queue;
+	if (addr) {
+		ccdf->addr_hi = 0;
+		ccdf->addr_lo = 0;
+		return true;
+	}
+
+	return false;
+}
+
 static inline void
 qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
 {
@@ -110,7 +134,8 @@ dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 	if (!virt_addr)
 		return NULL;
 
-	*phy_addr = rte_mem_virt2iova(virt_addr);
+	if (phy_addr)
+		*phy_addr = rte_mem_virt2iova(virt_addr);
 
 	return virt_addr;
 }
@@ -121,6 +146,7 @@ dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 static int
 fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
+	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_format *ccdf;
@@ -175,7 +201,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		ccdf = queue->cmd_desc;
 		qdma_desc_addr_set64(ccdf, phy_ft);
 		qdma_ccdf_set_format(ccdf, 0);
-		qdma_ccdf_set_ser(ccdf, 0);
+		if (!fsl_qdma->is_slient)
+			qdma_ccdf_set_ser(ccdf, 0);
+		qdma_ccdf_set_queue(ccdf, queue->queue_id);
 		queue->cmd_desc++;
 	}
 	queue->cmd_desc = head;
@@ -192,105 +220,91 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	return -ENOMEM;
 }
 
-static struct fsl_qdma_queue *
+static int
 fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 	int queue_id, int block_id)
 {
-	struct fsl_qdma_queue *queue_temp;
+	struct fsl_qdma_queue *cmd_queue;
 	uint32_t queue_size;
 
-	queue_temp = rte_zmalloc("qdma: queue head",
-		sizeof(struct fsl_qdma_queue), 0);
-	if (!queue_temp) {
-		DPAA_QDMA_ERR("no memory to allocate queues\n");
-		return NULL;
-	}
+	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
+	cmd_queue->engine = fsl_qdma;
+
 	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
 
-	queue_temp->cq = dma_pool_alloc(queue_size,
-		queue_size, &queue_temp->bus_addr);
-	if (!queue_temp->cq) {
-		rte_free(queue_temp);
-		return NULL;
-	}
+	cmd_queue->cq = dma_pool_alloc(queue_size,
+		queue_size, &cmd_queue->bus_addr);
+	if (!cmd_queue->cq)
+		return -ENOMEM;
 
-	memset(queue_temp->cq, 0x0, queue_size);
+	memset(cmd_queue->cq, 0x0, queue_size);
 
-	queue_temp->block_vir = fsl_qdma->block_base +
+	cmd_queue->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
-	queue_temp->n_cq = QDMA_QUEUE_SIZE;
-	queue_temp->queue_id = queue_id;
-	queue_temp->pending = 0;
-	queue_temp->cmd_desc = queue_temp->cq;
-	queue_temp->ft = rte_malloc("Compound Frame Table",
+	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
+	cmd_queue->queue_id = queue_id;
+	cmd_queue->block_id = block_id;
+	cmd_queue->pending = 0;
+	cmd_queue->cmd_desc = cmd_queue->cq;
+	cmd_queue->ft = rte_malloc("Compound Frame Table",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->ft) {
-		rte_free(queue_temp->cq);
-		rte_free(queue_temp);
-		return NULL;
+	if (!cmd_queue->ft) {
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
 	}
-	queue_temp->df = rte_malloc("Descriptor Buffer",
+	cmd_queue->df = rte_malloc("Descriptor Buffer",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->df) {
-		rte_free(queue_temp->ft);
-		rte_free(queue_temp->cq);
-		rte_free(queue_temp);
-		return NULL;
+	if (!cmd_queue->df) {
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
 	}
-	memset(&queue_temp->stats, 0, sizeof(struct rte_dma_stats));
+	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
 
-	return queue_temp;
+	return 0;
 }
 
 static void
-fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
+fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 {
 	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
-	rte_free(queue);
 }
 
-static struct fsl_qdma_queue *
+static void
+fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
+{
+	rte_free(queue->cq);
+}
+
+
+static int
 fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	uint32_t block_id)
 {
-	struct fsl_qdma_queue *status_head;
+	struct fsl_qdma_status_queue *status;
 	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
+	status = &fsl_qdma->stat_queues[block_id];
+	status->engine = fsl_qdma;
 
-	status_head = rte_zmalloc("qdma: status head",
-		sizeof(*status_head), 0);
-	if (!status_head)
-		return NULL;
+	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
 
-	/*
-	 * Buffer for queue command
-	 */
-	status_head->cq = dma_pool_alloc(status_size,
-		status_size, &status_head->bus_addr);
+	status->cq = dma_pool_alloc(status_size,
+		status_size, &status->bus_addr);
 
-	if (!status_head->cq) {
-		rte_free(status_head);
-		return NULL;
-	}
+	if (!status->cq)
+		return -ENOMEM;
 
-	memset(status_head->cq, 0x0, status_size);
-	status_head->n_cq = status_size;
-	status_head->cmd_desc = status_head->cq;
-	status_head->block_id = block_id;
-	status_head->block_vir = fsl_qdma->block_base +
+	memset(status->cq, 0x0, status_size);
+	status->n_cq = QDMA_STATUS_SIZE;
+	status->complete = 0;
+	status->block_id = block_id;
+	status->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	return status_head;
-}
-
-static void
-fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
-{
-	rte_free(status->cq);
-	rte_free(status);
+	return 0;
 }
 
 static int
@@ -358,6 +372,7 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
 		fsl_queue->complete++;
 		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
 			fsl_queue->complete = 0;
+		fsl_queue->stats.completed++;
 
 		count++;
 	}
@@ -368,9 +383,10 @@ static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
 	struct fsl_qdma_queue *temp;
+	struct fsl_qdma_status_queue *temp_stat;
 	void *ctrl = fsl_qdma->ctrl_base;
 	uint8_t *block;
-	uint32_t i, j, k = 0;
+	uint32_t i, j;
 	uint32_t reg;
 	int ret, val;
 
@@ -385,7 +401,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 		for (i = 0; i < QDMA_QUEUES; i++) {
-			temp = fsl_qdma->queue[k];
+			temp = &fsl_qdma->cmd_queues[j][i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -409,7 +425,6 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
 			temp->le_cqmr = reg;
 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
-			k++;
 		}
 
 		/*
@@ -419,7 +434,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 */
 
 		qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
-			    block + FSL_QDMA_SQCCMR);
+			block + FSL_QDMA_SQCCMR);
 
 		/*
 		 * Initialize status queue registers to point to the first
@@ -428,14 +443,14 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		temp = fsl_qdma->status[j];
-		qdma_writel(upper_32_bits(temp->bus_addr),
+		temp_stat = &fsl_qdma->stat_queues[j];
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEEPAR);
-		qdma_writel(lower_32_bits(temp->bus_addr),
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEPAR);
-		qdma_writel(upper_32_bits(temp->bus_addr),
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEDPAR);
-		qdma_writel(lower_32_bits(temp->bus_addr),
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
@@ -445,7 +460,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2_qsize(temp->n_cq);
+		val = ilog2_qsize(temp_stat->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -560,8 +575,29 @@ static int
 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 	uint16_t vchan)
 {
-	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	int ret, i, j, found = 0;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	if (fsl_queue) {
+		found = 1;
+		goto queue_found;
+	}
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_queue = &fsl_qdma->cmd_queues[i][j];
+
+			if (fsl_queue->channel_id == vchan) {
+				found = 1;
+				fsl_qdma->chan[vchan] = fsl_queue;
+				goto queue_found;
+			}
+		}
+	}
+
+queue_found:
+	if (!found)
+		return -ENXIO;
 
 	if (fsl_queue->used)
 		return 0;
@@ -571,15 +607,19 @@ dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 		return ret;
 
 	fsl_queue->used = 1;
+	fsl_qdma->block_queues[fsl_queue->block_id]++;
 
 	return 0;
 }
 
 static int
-dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-	__rte_unused const struct rte_dma_conf *dev_conf,
+dpaa_qdma_configure(struct rte_dma_dev *dmadev,
+	const struct rte_dma_conf *dev_conf,
 	__rte_unused uint32_t conf_sz)
 {
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+
+	fsl_qdma->is_slient = dev_conf->enable_silent;
 	return 0;
 }
 
@@ -610,7 +650,7 @@ static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *block = fsl_queue->block_vir;
 
 	while (fsl_queue->pending) {
@@ -629,7 +669,7 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	uint32_t length, uint64_t flags)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	int ret;
 
 	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
@@ -637,6 +677,42 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	return ret;
 }
 
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
+{
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_format *cq;
+	uint16_t start, count = 0;
+	uint8_t qid;
+	int ret;
+
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	do {
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+			cmd_queue->stats.completed++;
+			cmd_queue->complete++;
+			if (unlikely(cmd_queue->complete == cmd_queue->n_cq))
+				cmd_queue->complete = 0;
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
@@ -644,12 +720,22 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
 	int intr;
 
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-			nb_cpls, st);
+	if (unlikely(fsl_qdma->is_slient)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode\n");
+		return 0;
+	}
+
+	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
+		ret = dpaa_qdma_block_dequeue(fsl_qdma,
+				fsl_queue->block_id);
+	} else {
+		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+				nb_cpls, st);
+	}
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 		if (intr) {
@@ -674,7 +760,6 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 		}
 	}
 
-	fsl_queue->stats.completed += ret;
 	if (last_idx) {
 		if (unlikely(!fsl_queue->complete))
 			*last_idx = fsl_queue->n_cq - 1;
@@ -693,15 +778,26 @@ dpaa_qdma_dequeue(void *dev_private,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
 	int intr;
 #endif
 
+	if (unlikely(fsl_qdma->is_slient)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode\n");
+
+		return 0;
+	}
+
 	*has_error = false;
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-			nb_cpls, NULL);
+	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
+		ret = dpaa_qdma_block_dequeue(fsl_qdma,
+				fsl_queue->block_id);
+	} else {
+		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+				nb_cpls, NULL);
+	}
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
@@ -727,7 +823,6 @@ dpaa_qdma_dequeue(void *dev_private,
 		}
 	}
 #endif
-	fsl_queue->stats.completed += ret;
 	if (last_idx) {
 		if (unlikely(!fsl_queue->complete))
 			*last_idx = fsl_queue->n_cq - 1;
@@ -743,7 +838,7 @@ dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
 	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -760,7 +855,7 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
 	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
@@ -771,7 +866,7 @@ static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
 	const struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -827,37 +922,19 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	fsl_qdma->block_base =
 		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
 
-	fsl_qdma->status = rte_malloc("status queue",
-		sizeof(struct fsl_qdma_queue) * QDMA_BLOCKS, 0);
-	if (!fsl_qdma->status) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	fsl_qdma->queue = rte_malloc("cmd queue",
-		sizeof(struct fsl_qdma_queue) * fsl_qdma->n_queues, 0);
-	if (!fsl_qdma->queue) {
-		rte_free(fsl_qdma->status);
-		ret = -ENOMEM;
-		goto err;
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (ret)
+			goto mem_free;
 	}
 
 	k = 0;
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] =
-			fsl_qdma_prep_status_queue(fsl_qdma, i);
-		if (!fsl_qdma->status[i]) {
-			ret = -ENOMEM;
-			goto mem_free;
-		}
-		j = 0;
-		for (j = 0; j < QDMA_QUEUES; j++) {
-			fsl_qdma->queue[k] =
-				fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
-			if (!fsl_qdma->queue[k]) {
-				ret = -ENOMEM;
+	for (i = 0; i < QDMA_QUEUES; i++) {
+		for (j = 0; j < QDMA_BLOCKS; j++) {
+			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
+			if (ret)
 				goto mem_free;
-			}
+			fsl_qdma->cmd_queues[j][i].channel_id = k;
 			k++;
 		}
 	}
@@ -865,24 +942,20 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
-		rte_free(fsl_qdma->status);
 		goto mem_free;
 	}
 
 	return 0;
 
 mem_free:
-	for (j = 0; j < k; j++)
-		fsl_qdma_free_queue_resources(fsl_qdma->queue[j]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-	for (j = 0; j < i; j++)
-		fsl_qdma_free_status_queue(fsl_qdma->status[j]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
 
-	if (fsl_qdma->status)
-		rte_free(fsl_qdma->status);
-err:
-	if (fsl_qdma->queue)
-		rte_free(fsl_qdma->queue);
 	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	return ret;
@@ -929,16 +1002,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	uint32_t i;
+	uint32_t i, j, regs_size;
 
-	for (i = 0; i < fsl_qdma->num_blocks; i++)
-		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 
-	for (i = 0; i < fsl_qdma->num_blocks * QDMA_QUEUES; i++)
-		fsl_qdma_free_queue_resources(fsl_qdma->queue[i]);
+	for (i = 0; i < QDMA_BLOCKS; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-	rte_free(fsl_qdma->queue);
-	rte_free(fsl_qdma->status);
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
+
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 25954ef3a4..a767da0a3f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -107,13 +107,13 @@
 #define COMMAND_QUEUE_OVERFLOW		10
 
 /* qdma engine attribute */
-#define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		QDMA_QUEUE_SIZE
-#define QDMA_CCSR_BASE			0x8380000
-#define QDMA_BLOCK_OFFSET		0x10000
-#define QDMA_BLOCKS			4
-#define QDMA_QUEUES			8
-#define QDMA_QUEUE_CR_WM		32
+#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
+#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
+#define QDMA_CCSR_BASE 0x8380000
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
+#define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -140,7 +140,9 @@ struct fsl_qdma_format {
 			uint32_t addr_lo; /* low 32-bits of 40-bit address */
 			uint8_t addr_hi; /* high 8-bits of 40-bit address */
 			uint8_t __reserved1[2];
-			uint8_t cfg8b_w1; /* dd, queue */
+			uint8_t queue:3;
+			uint8_t rsv:3;
+			uint8_t dd:2;
 		};
 		uint64_t data;
 	};
@@ -182,6 +184,7 @@ struct fsl_qdma_queue {
 	uint16_t n_cq;
 	uint8_t block_id;
 	uint8_t queue_id;
+	uint8_t channel_id;
 	void *block_vir;
 	uint32_t le_cqmr;
 	struct fsl_qdma_format *cq;
@@ -189,6 +192,18 @@ struct fsl_qdma_queue {
 	uint8_t pending;
 	dma_addr_t bus_addr;
 	struct fsl_qdma_df **df;
+	void *engine;
+};
+
+struct fsl_qdma_status_queue {
+	uint16_t n_cq;
+	uint16_t complete;
+	uint8_t block_id;
+	void *block_vir;
+	struct fsl_qdma_format *cq;
+	struct rte_dma_stats stats;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
 struct fsl_qdma_engine {
@@ -197,10 +212,13 @@ struct fsl_qdma_engine {
 	void *status_base;
 	void *block_base;
 	uint32_t n_queues;
-	struct fsl_qdma_queue **queue;
-	struct fsl_qdma_queue **status;
+	uint8_t block_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES];
+	struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
 	uint32_t num_blocks;
 	int block_offset;
+	int is_slient;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 22/30] dma/dpaa: add Scatter Gather support
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (19 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 23/30] dma/dpaa: block dequeue Gagandeep Singh
                   ` (8 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Perform SG operation by copy_sg callback of DMA lib or
burst request from application.
Perform Simple operation if burst number is 1.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 856 ++++++++++++++++++++++++++---------
 drivers/dma/dpaa/dpaa_qdma.h | 184 +++++---
 2 files changed, 763 insertions(+), 277 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 646542eb8f..75e92a1b0c 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,45 +4,31 @@
 
 #include <bus_dpaa_driver.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_pmd_dpaax_qdma.h>
 
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static int s_data_validation;
+static int s_hw_err_check;
+static int s_sg_disable;
+
 static inline void
-qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
 {
 	ccdf->addr_hi = upper_32_bits(addr);
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline void
-qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-{
-	ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
-}
-
-static inline int
-qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
-{
-	return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_STATUS;
-}
-
-static inline void
-qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
 {
-	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
-}
-
-static inline void
-qdma_ccdf_set_queue(struct fsl_qdma_format *ccdf,
-	uint8_t queue_idx)
-{
-	ccdf->queue = queue_idx;
+	sge->addr_hi = upper_32_bits(addr);
+	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline int
-qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
+qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
 	uint8_t *queue_idx)
 {
 	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
@@ -58,18 +44,6 @@ qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
 	return false;
 }
 
-static inline void
-qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
-{
-	csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
-}
-
-static inline void
-qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
-{
-	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
-}
-
 static inline int ilog2(int x)
 {
 	int log = 0;
@@ -126,11 +100,11 @@ qdma_writel_be(uint32_t val, void *addr)
 }
 
 static void *
-dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
-	virt_addr = rte_malloc("dma pool alloc", size, aligned);
+	virt_addr = rte_zmalloc(nm, size, aligned);
 	if (!virt_addr)
 		return NULL;
 
@@ -149,28 +123,46 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *ccdf;
+	struct fsl_qdma_comp_cmd_desc *ccdf;
 	uint16_t i, j;
-	struct fsl_qdma_format *head;
 	struct fsl_qdma_cmpd_ft *ft;
 	struct fsl_qdma_df *df;
 
-	head = queue->cmd_desc;
-
 	for (i = 0; i < queue->n_cq; i++) {
 		dma_addr_t phy_ft = 0, phy_df = 0;
 
-		queue->ft[i] =
-			dma_pool_alloc(sizeof(struct fsl_qdma_cmpd_ft),
-				RTE_CACHE_LINE_SIZE, &phy_ft);
+		queue->ft[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_cmpd_ft),
+			RTE_CACHE_LINE_SIZE, &phy_ft);
 		if (!queue->ft[i])
 			goto fail;
-
-		queue->df[i] =
-			dma_pool_alloc(sizeof(struct fsl_qdma_df),
-				RTE_CACHE_LINE_SIZE, &phy_df);
+		if (((uint64_t)queue->ft[i]) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
+				i, queue->ft[i]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
+			goto fail;
+		}
+		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
+				i, &queue->ft[i]->desc_ssge[0]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
+			goto fail;
+		}
+		queue->ft[i]->phy_ssge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
+		queue->ft[i]->phy_dsge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
+
+		queue->df[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_df),
+			RTE_CACHE_LINE_SIZE, &phy_df);
 		if (!queue->df[i]) {
 			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
 
@@ -182,31 +174,25 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		sdf = &df->sdf;
 		ddf = &df->ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(&ft->desc_buf, phy_df);
-
+		qdma_desc_sge_addr_set64(&ft->desc_buf, phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(&ft->desc_buf,
-			sizeof(struct fsl_qdma_df));
+		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
+
 		/* Descriptor Buffer */
-		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+		sdf->prefetch = 1;
 #endif
-		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
-				FSL_QDMA_CMD_LWC_OFFSET);
+		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
+		ddf->lwc = FSL_QDMA_CMD_LWC;
 
-		ccdf = queue->cmd_desc;
+		ccdf = &queue->cq[i];
 		qdma_desc_addr_set64(ccdf, phy_ft);
-		qdma_ccdf_set_format(ccdf, 0);
+		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
 		if (!fsl_qdma->is_slient)
-			qdma_ccdf_set_ser(ccdf, 0);
-		qdma_ccdf_set_queue(ccdf, queue->queue_id);
-		queue->cmd_desc++;
+			ccdf->ser = 1;
+		ccdf->queue = queue->queue_id;
 	}
-	queue->cmd_desc = head;
 	queue->ci = 0;
 
 	return 0;
@@ -226,40 +212,107 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 {
 	struct fsl_qdma_queue *cmd_queue;
 	uint32_t queue_size;
+	char nm[RTE_MEMZONE_NAMESIZE];
 
 	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
 	cmd_queue->engine = fsl_qdma;
 
-	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
+	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
+		QDMA_QUEUE_SIZE;
 
-	cmd_queue->cq = dma_pool_alloc(queue_size,
+	sprintf(nm, "Command queue_%d_%d",
+		block_id, queue_id);
+	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
 		queue_size, &cmd_queue->bus_addr);
-	if (!cmd_queue->cq)
+	if (!cmd_queue->cq) {
+		DPAA_QDMA_ERR("%s alloc failed!", nm);
 		return -ENOMEM;
-
-	memset(cmd_queue->cq, 0x0, queue_size);
+	}
 
 	cmd_queue->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
 	cmd_queue->queue_id = queue_id;
 	cmd_queue->block_id = block_id;
-	cmd_queue->pending = 0;
-	cmd_queue->cmd_desc = cmd_queue->cq;
-	cmd_queue->ft = rte_malloc("Compound Frame Table",
+	cmd_queue->pending_start = 0;
+	cmd_queue->pending_num = 0;
+	cmd_queue->complete_start = 0;
+
+	sprintf(nm, "Compound Table_%d_%d",
+		block_id, queue_id);
+	cmd_queue->ft = rte_zmalloc(nm,
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
 	if (!cmd_queue->ft) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
 	}
-	cmd_queue->df = rte_malloc("Descriptor Buffer",
+	sprintf(nm, "Descriptor Buf_%d_%d",
+		block_id, queue_id);
+	cmd_queue->df = rte_zmalloc(nm,
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
 	if (!cmd_queue->df) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "Pending_desc_%d_%d",
+		block_id, queue_id);
+	cmd_queue->pending_desc = rte_zmalloc(nm,
+		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
+	if (!cmd_queue->pending_desc) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->df);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-burst_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_burst = rte_ring_create(nm,
+		QDMA_QUEUE_SIZE * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_burst) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
+	sprintf(nm, "complete-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_desc = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_desc) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-pool-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_pool = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_pool) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_desc);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
+		return -ENOMEM;
+	}
+
 	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
+	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
 
 	return 0;
 }
@@ -270,6 +323,10 @@ fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
+	rte_free(queue->pending_desc);
+	rte_ring_free(queue->complete_burst);
+	rte_ring_free(queue->complete_desc);
+	rte_ring_free(queue->complete_pool);
 }
 
 static void
@@ -289,9 +346,10 @@ fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	status = &fsl_qdma->stat_queues[block_id];
 	status->engine = fsl_qdma;
 
-	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
+	status_size = QDMA_STATUS_SIZE *
+		sizeof(struct fsl_qdma_comp_cmd_desc);
 
-	status->cq = dma_pool_alloc(status_size,
+	status->cq = dma_pool_alloc(NULL, status_size,
 		status_size, &status->bus_addr);
 
 	if (!status->cq)
@@ -352,31 +410,116 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
+static void
+fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
+	uint8_t num, struct fsl_qdma_queue *fsl_queue)
+{
+	uint32_t i, j;
+	uint8_t *v_src, *v_dst;
+	char err_msg[512];
+	int offset;
+
+	if (likely(!s_data_validation))
+		return;
+
+	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
+		fsl_queue->block_id,
+		fsl_queue->queue_id);
+	for (i = 0; i < num; i++) {
+		v_src = rte_mem_iova2virt(desc[i]->src);
+		v_dst = rte_mem_iova2virt(desc[i]->dst);
+		for (j = 0; j < desc[i]->len; j++) {
+			if (v_src[j] != v_dst[j]) {
+				sprintf(&err_msg[offset],
+					"job[%"PRId64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
+					desc[i]->flag, v_src, j, v_src[j],
+					v_dst, j, v_dst[j]);
+				DPAA_QDMA_ERR("%s, stop validating!\r\n",
+					err_msg);
+				return;
+			}
+		}
+	}
+}
+
 static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
-	const uint16_t nb_cpls,
-	enum rte_dma_status_code *status)
+fsl_qdma_queue_drain(struct fsl_qdma_queue *fsl_queue)
 {
 	uint32_t reg;
-	int count = 0;
+	int count = 0, ret;
 	uint8_t *block = fsl_queue->block_vir;
+	uint16_t *dq_complete, drain_num = 0;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
 
-	while (count < nb_cpls) {
+	while (1) {
+		if (rte_ring_free_count(fsl_queue->complete_pool) <
+			(FSL_QDMA_SG_MAX_ENTRY * 2))
+			break;
 		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
 		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			return count;
+			break;
 
 		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
-		if (status != NULL)
-			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
-		fsl_queue->complete++;
-		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
-			fsl_queue->complete = 0;
+		ret = rte_ring_dequeue(fsl_queue->complete_burst,
+			(void **)&dq_complete);
+		if (ret)
+			DPAA_QDMA_ERR("DQ desc number failed!\n");
+
+		ret = rte_ring_dequeue_bulk(fsl_queue->complete_desc,
+			(void **)desc, *dq_complete, NULL);
+		if (ret != (*dq_complete)) {
+			DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
+				*dq_complete, ret);
+		}
+
+		fsl_qdma_data_validation(desc, *dq_complete, fsl_queue);
+
+		ret = rte_ring_enqueue_bulk(fsl_queue->complete_pool,
+			(void **)desc, (*dq_complete), NULL);
+		if (ret != (*dq_complete)) {
+			DPAA_QDMA_ERR("EQ %d descs to return queue failed!(%d)\n",
+				*dq_complete, ret);
+		}
+
+		drain_num += *dq_complete;
+		fsl_queue->complete_start =
+			(fsl_queue->complete_start + (*dq_complete)) &
+			(fsl_queue->pending_max - 1);
 		fsl_queue->stats.completed++;
 
 		count++;
 	}
-	return count;
+
+	return drain_num;
+}
+
+static int
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *status)
+{
+	int ret;
+	uint16_t dq_num = 0, i;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+
+	ret = fsl_qdma_queue_drain(fsl_queue);
+	if (ret < 0) {
+		DPAA_QDMA_ERR("Drain TX%d/Q%d failed!(%d)",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			ret);
+	}
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+		(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (status) {
+		for (i = 0; i < dq_num; i++)
+			status[i] = RTE_DMA_STATUS_SUCCESSFUL;
+	}
+
+	return dq_num;
 }
 
 static int
@@ -473,87 +616,253 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
-	uint64_t flags, dma_addr_t dst,
-	dma_addr_t src, size_t len)
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	int is_burst)
+{
+	uint16_t i, num = fsl_queue->pending_num, idx, start;
+	int ret;
+
+	num = is_burst ? fsl_queue->pending_num : 1;
+
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_ERR("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_ERR("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
 {
 	uint8_t *block = fsl_queue->block_vir;
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	struct fsl_qdma_sdf *sdf;
-	uint32_t cfg = 0;
-#endif
+	int ret;
 
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	uint32_t reg;
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0, drain;
+	uint32_t reg, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
 
-	/* retrieve and store the register value in big endian
-	 * to avoid bits swap
-	 */
-	reg = qdma_readl_be(block +
+	check_num = 0;
+overflow_check:
+	if (unlikely(s_hw_err_check)) {
+		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
-		DPAA_QDMA_ERR("QDMA Engine is busy\n");
-		return -EBUSY;
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
+
+	if (likely(!overflow))
+		return 0;
+
+	DPAA_QDMA_ERR("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	drain = fsl_qdma_queue_drain(fsl_queue);
+	if (drain <= 0) {
+		drain_num++;
+		if (drain_num > 100) {
+			DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			return -ENOSPC;
+		}
+		goto drain_again;
 	}
-#else
-	/* check whether critical watermark level reached,
-	 * below check is valid for only single queue per block
-	 */
-	if (fsl_qdma_queue_bd_in_hw(fsl_queue) >= QDMA_QUEUE_CR_WM) {
-		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
+	check_num++;
+	if (check_num > 10) {
+		DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
 		return -ENOSPC;
 	}
+	goto overflow_check;
+
+	return -ENOSPC;
+}
+
+static int
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+{
+	uint8_t *block = fsl_queue->block_vir, i;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	uint32_t total_len = 0;
+	uint8_t num = fsl_queue->pending_num;
+	uint16_t start = fsl_queue->pending_start, idx;
+	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
 #endif
 
-	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
-		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
-		return -ENOSPC;
-	}
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = fsl_queue->df[fsl_queue->ci];
-	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	sdf = &fsl_queue->df[fsl_queue->ci]->sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+	sdf->prefetch = 1;
 #endif
-	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
-		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
-					FSL_QDMA_CFG_SSS_OFFSET |
-					FSL_QDMA_CMD_SSS_DISTANCE);
-		sdf->cfg = cfg;
-	} else
-		sdf->cfg = 0;
 #endif
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-	fsl_queue->ci++;
 
-	if (fsl_queue->ci == fsl_queue->n_cq)
-		fsl_queue->ci = 0;
+	if (num == 1) {
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+		if (fsl_queue->pending_desc[start].len >
+			FSL_QDMA_CMD_SSS_DISTANCE) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+			sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
+		} else {
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
+#endif
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	} else if (s_sg_disable) {
+		while (fsl_queue->pending_num > 0) {
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+			if (fsl_queue->pending_desc[start].len >
+				FSL_QDMA_CMD_SSS_DISTANCE) {
+				sdf->ssen = 1;
+				sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+				sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
+			} else {
+				sdf->sss = 0;
+				sdf->ssd = 0;
+			}
+#endif
+			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+				fsl_queue->pending_desc[start].dst,
+				fsl_queue->pending_desc[start].src,
+				fsl_queue->pending_desc[start].len);
+			if (!ret) {
+				start = (start + 1) &
+					(fsl_queue->pending_max - 1);
+				fsl_queue->pending_start = start;
+				fsl_queue->pending_num--;
+			} else {
+				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
+					ret);
+				return -EIO;
+			}
+		}
 
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-		fsl_queue->stats.submitted++;
+		return 0;
+	}
+	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
+	csgf_src->extion = 1;
+	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
+	csgf_dest->extion = 1;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
+			fsl_queue->pending_desc[idx].src);
+		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_ssge[i].final = 0;
+		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
+			fsl_queue->pending_desc[idx].dst);
+		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_dsge[i].final = 0;
+		total_len += fsl_queue->pending_desc[idx].len;
+	}
+	ft->desc_ssge[num - 1].final = 1;
+	ft->desc_dsge[num - 1].final = 1;
+	csgf_src->length = total_len;
+	csgf_dest->length = total_len;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	if (total_len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+		sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
 	} else {
-		fsl_queue->pending++;
+		sdf->sss = 0;
+		sdf->ssd = 0;
 	}
+#endif
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	if (ret)
+		return ret;
+
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
 
-	if (fsl_queue->ci)
-		return fsl_queue->ci - 1;
-	else
-		return fsl_queue->n_cq;
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	fsl_queue->pending_start =
+		(start + num) & (fsl_queue->pending_max - 1);
+	fsl_queue->pending_num = 0;
+
+	return 0;
 }
 
 static int
@@ -564,8 +873,9 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
 		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = fsl_qdma->n_queues;
-	dev_info->max_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
 	dev_info->min_desc = QDMA_QUEUE_SIZE;
 
 	return 0;
@@ -651,16 +961,11 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
-	void *block = fsl_queue->block_vir;
 
-	while (fsl_queue->pending) {
-		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-		fsl_queue->pending--;
-		fsl_queue->stats.submitted++;
-	}
+	if (!fsl_queue->pending_num)
+		return 0;
 
-	return 0;
+	return fsl_qdma_enqueue_desc(fsl_queue);
 }
 
 static int
@@ -670,9 +975,86 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	uint16_t start = fsl_queue->pending_start;
+	uint8_t pending = fsl_queue->pending_num;
+	uint16_t idx;
 	int ret;
 
-	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
+	if (pending >= FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	idx = (start + pending) & (fsl_queue->pending_max - 1);
+
+	fsl_queue->pending_desc[idx].src = src;
+	fsl_queue->pending_desc[idx].dst = dst;
+	fsl_queue->pending_desc[idx].flag =
+		DPAA_QDMA_IDX_FROM_FLAG(flags);
+	fsl_queue->pending_desc[idx].len = length;
+	fsl_queue->pending_num++;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
+
+	return ret;
+}
+
+static int
+dpaa_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
+{
+	int ret;
+	uint16_t i, start, idx;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
+			__func__, nb_src, nb_dst, vchan);
+		return -EINVAL;
+	}
+
+	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	start = fsl_queue->pending_start + fsl_queue->pending_num;
+	start = start & (fsl_queue->pending_max - 1);
+	idx = start;
+
+	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+
+	for (i = 0; i < nb_src; i++) {
+		if (unlikely(src[i].length != dst[i].length)) {
+			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
+				src[i].length, dst[i].length);
+			return -EINVAL;
+		}
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		fsl_queue->pending_desc[idx].src = src[i].addr;
+		fsl_queue->pending_desc[idx].dst = dst[i].addr;
+		fsl_queue->pending_desc[idx].len = dst[i].length;
+		fsl_queue->pending_desc[idx].flag = idx_addr[i];
+	}
+	fsl_queue->pending_num += nb_src;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
 
 	return ret;
 }
@@ -683,7 +1065,7 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 {
 	struct fsl_qdma_status_queue *stat_queue;
 	struct fsl_qdma_queue *cmd_queue;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
 	uint16_t start, count = 0;
 	uint8_t qid;
 	int ret;
@@ -697,9 +1079,6 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 		if (ret == true) {
 			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
 			cmd_queue->stats.completed++;
-			cmd_queue->complete++;
-			if (unlikely(cmd_queue->complete == cmd_queue->n_cq))
-				cmd_queue->complete = 0;
 			start++;
 			if (unlikely(start == stat_queue->n_cq))
 				start = 0;
@@ -713,16 +1092,81 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	return count;
 }
 
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+	struct fsl_qdma_err_reg local;
+	size_t i, offset = 0;
+	char err_msg[512];
+
+	local.dedr_be = rte_read32(&reg->dedr_be);
+	if (!local.dedr_be)
+		return 0;
+	offset = sprintf(err_msg, "ERR detected:\n");
+	if (local.dedr.ere) {
+		offset += sprintf(&err_msg[offset],
+			" ere(Enqueue rejection error)\n");
+	}
+	if (local.dedr.dde) {
+		offset += sprintf(&err_msg[offset],
+			" dde(Destination descriptor error)\n");
+	}
+	if (local.dedr.sde) {
+		offset += sprintf(&err_msg[offset],
+			" sde(Source descriptor error)\n");
+	}
+	if (local.dedr.cde) {
+		offset += sprintf(&err_msg[offset],
+			" cde(Command descriptor error)\n");
+	}
+	if (local.dedr.wte) {
+		offset += sprintf(&err_msg[offset],
+			" wte(Write transaction error)\n");
+	}
+	if (local.dedr.rte) {
+		offset += sprintf(&err_msg[offset],
+			" rte(Read transaction error)\n");
+	}
+	if (local.dedr.me) {
+		offset += sprintf(&err_msg[offset],
+			" me(Multiple errors of the same type)\n");
+	}
+	DPAA_QDMA_ERR("%s", err_msg);
+	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+			QDMA_IN(&reg->deccd_le[i]);
+	}
+	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
+	local.decbr = rte_read32(&reg->decbr);
+
+	offset = sprintf(err_msg, "ERR command:\n");
+	offset += sprintf(&err_msg[offset],
+		" status: %02x, ser: %d, offset:%d, fmt: %02x\n",
+		local.err_cmd.status, local.err_cmd.ser,
+		local.err_cmd.offset, local.err_cmd.format);
+	offset += sprintf(&err_msg[offset],
+		" address: 0x%"PRIx64", queue: %d, dd: %02x\n",
+		(uint64_t)local.err_cmd.addr_hi << 32 |
+		local.err_cmd.addr_lo,
+		local.err_cmd.queue, local.err_cmd.dd);
+	DPAA_QDMA_ERR("%s", err_msg);
+	DPAA_QDMA_ERR("ERR command block: %d, queue: %d\n",
+		local.deccqidr.block, local.deccqidr.queue);
+
+	rte_write32(local.dedr_be, &reg->dedr_be);
+
+	return -EIO;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret;
+	int ret, err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
-	int intr;
 
 	if (unlikely(fsl_qdma->is_slient)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -734,55 +1178,27 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 				fsl_queue->block_id);
 	} else {
 		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, st);
+				nb_cpls, last_idx, st);
 	}
-	if (!ret) {
-		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-		if (intr) {
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECBR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-#endif
-			qdma_writel_be(0xbf,
-				    status + FSL_QDMA_DEDR);
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err)
 			fsl_queue->stats.errors++;
-		}
-	}
-
-	if (last_idx) {
-		if (unlikely(!fsl_queue->complete))
-			*last_idx = fsl_queue->n_cq - 1;
-		else
-			*last_idx = fsl_queue->complete - 1;
 	}
 
 	return ret;
 }
 
-
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret;
+	int ret, err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
-	int intr;
-#endif
 
 	if (unlikely(fsl_qdma->is_slient)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -796,39 +1212,17 @@ dpaa_qdma_dequeue(void *dev_private,
 				fsl_queue->block_id);
 	} else {
 		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, NULL);
+				nb_cpls, last_idx, NULL);
 	}
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	if (!ret) {
-		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-		if (intr) {
-			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECBR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-			qdma_writel_be(0xbf,
-				    status + FSL_QDMA_DEDR);
-			intr = qdma_readl(status + FSL_QDMA_DEDR);
-			*has_error = true;
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err) {
+			if (has_error)
+				*has_error = true;
 			fsl_queue->stats.errors++;
 		}
 	}
-#endif
-	if (last_idx) {
-		if (unlikely(!fsl_queue->complete))
-			*last_idx = fsl_queue->n_cq - 1;
-		else
-			*last_idx = fsl_queue->complete - 1;
-	}
 
 	return ret;
 }
@@ -868,7 +1262,7 @@ dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 	const struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	return fsl_queue->n_cq - fsl_queue->pending;
+	return fsl_queue->pending_max - fsl_queue->pending_num;
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
@@ -891,6 +1285,15 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ret;
 	uint32_t i, j, k;
 
+	if (getenv("DPAA_QDMA_DATA_VALIDATION"))
+		s_data_validation = 1;
+
+	if (getenv("DPAA_QDMA_HW_ERR_CHECK"))
+		s_hw_err_check = 1;
+
+	if (getenv("DPAA_QDMA_SG_DISABLE"))
+		s_sg_disable = 1;
+
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -981,6 +1384,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->device = &dpaa_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
+	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index a767da0a3f..f4535af3dd 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -83,29 +83,15 @@
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
 
+#define FSL_QDMA_COMP_SG_FORMAT		0x1
+
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
-#define FSL_QDMA_CMD_LWC_OFFSET		16
-#define FSL_QDMA_CMD_PF			BIT(17)
-
-#define FSL_QDMA_CMD_SSEN		BIT(19)
 #define FSL_QDMA_CFG_SSS_OFFSET		12
 #define FSL_QDMA_CMD_SSS_STRIDE		128
 #define FSL_QDMA_CMD_SSS_DISTANCE	128
 
-#define QDMA_CCDF_STATUS		20
-#define QDMA_CCDF_OFFSET		20
-#define QDMA_CCDF_MASK			GENMASK(28, 20)
-#define QDMA_CCDF_FOTMAT		BIT(29)
-#define QDMA_CCDF_SER			BIT(30)
-
-#define QDMA_SG_FIN			BIT(30)
-#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
-
-#define COMMAND_QUEUE_OVERFLOW		10
-
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
 #define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
@@ -132,64 +118,160 @@
 	(((fsl_qdma_engine)->block_offset) * (x))
 
 /* qDMA Command Descriptor Formats */
-struct fsl_qdma_format {
-	uint32_t status; /* ser, status */
-	uint32_t cfg;	/* format, offset */
-	union {
-		struct {
-			uint32_t addr_lo; /* low 32-bits of 40-bit address */
-			uint8_t addr_hi; /* high 8-bits of 40-bit address */
-			uint8_t __reserved1[2];
-			uint8_t queue:3;
-			uint8_t rsv:3;
-			uint8_t dd:2;
-		};
-		uint64_t data;
-	};
-};
+struct fsl_qdma_comp_cmd_desc {
+	uint8_t status;
+	uint32_t rsv0:22;
+	uint32_t ser:1;
+	uint32_t rsv1:21;
+	uint32_t offset:9;
+	uint32_t format:3;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint16_t rsv3;
+	uint8_t queue:3;
+	uint8_t rsv4:3;
+	uint8_t dd:2;
+} __rte_packed;
+
+struct fsl_qdma_comp_sg_desc {
+	uint32_t offset:13;
+	uint32_t rsv0:19;
+	uint32_t length:30;
+	uint32_t final:1;
+	uint32_t extion:1;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint32_t rsv1:24;
+} __rte_packed;
 
-/* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	uint32_t rev3;
-	uint32_t cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	uint32_t rev5;
-	uint32_t cmd;
-};
+	uint32_t rsv0;
+	uint32_t ssd:12;
+	uint32_t sss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint32_t rsv3:17;
+	uint32_t prefetch:1;
+	uint32_t rsv4:1;
+	uint32_t ssen:1;
+	uint32_t rthrotl:4;
+	uint32_t sqos:3;
+	uint32_t ns:1;
+	uint32_t srttype:4;
+} __rte_packed;
 
-/* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	uint32_t rev1;
-	uint32_t cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	uint32_t rev3;
-	uint32_t cmd;
-};
+	uint32_t rsv0;
+	uint32_t dsd:12;
+	uint32_t dss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint16_t rsv3;
+	uint32_t lwc:2;
+	uint32_t rsv4:1;
+	uint32_t dsen:1;
+	uint32_t wthrotl:4;
+	uint32_t dqos:3;
+	uint32_t ns:1;
+	uint32_t dwttype:4;
+} __rte_packed;
 
 struct fsl_qdma_df {
 	struct fsl_qdma_sdf sdf;
 	struct fsl_qdma_ddf ddf;
 };
 
+#define FSL_QDMA_SG_MAX_ENTRY RTE_DPAAX_QDMA_JOB_SUBMIT_MAX
+#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE)
 struct fsl_qdma_cmpd_ft {
-	struct fsl_qdma_format desc_buf;
-	struct fsl_qdma_format desc_sbuf;
-	struct fsl_qdma_format desc_dbuf;
+	struct fsl_qdma_comp_sg_desc desc_buf;
+	struct fsl_qdma_comp_sg_desc desc_sbuf;
+	struct fsl_qdma_comp_sg_desc desc_dbuf;
+	uint64_t cache_align[2];
+	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	uint64_t phy_ssge;
+	uint64_t phy_dsge;
+} __rte_packed;
+
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+	uint32_t me:1;
+	uint32_t rsv0:1;
+	uint32_t rte:1;
+	uint32_t wte:1;
+	uint32_t cde:1;
+	uint32_t sde:1;
+	uint32_t dde:1;
+	uint32_t ere:1;
+	uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+	uint32_t rsv:27;
+	uint32_t block:2;
+	uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+	(sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+	uint32_t deier;
+	union {
+		rte_be32_t dedr_be;
+		struct fsl_qdma_dedr_reg dedr;
+	};
+	uint32_t rsv0[2];
+	union {
+		rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+		struct fsl_qdma_comp_cmd_desc err_cmd;
+	};
+	uint32_t rsv1[4];
+	union {
+		rte_be32_t deccqidr_be;
+		struct fsl_qdma_deccqidr_reg deccqidr;
+	};
+	rte_be32_t decbr;
+};
+
+#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+
+struct fsl_qdma_desc {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint64_t flag;
+	uint64_t len;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format *cmd_desc;
 	int used;
 	struct fsl_qdma_cmpd_ft **ft;
 	uint16_t ci;
-	uint16_t complete;
+	struct rte_ring *complete_burst;
+	struct rte_ring *complete_desc;
+	struct rte_ring *complete_pool;
 	uint16_t n_cq;
 	uint8_t block_id;
 	uint8_t queue_id;
 	uint8_t channel_id;
 	void *block_vir;
 	uint32_t le_cqmr;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t desc_in_hw[QDMA_QUEUE_SIZE];
 	struct rte_dma_stats stats;
-	uint8_t pending;
+	struct fsl_qdma_desc *pending_desc;
+	uint16_t pending_max;
+	uint16_t pending_start;
+	uint8_t pending_num;
+	uint16_t complete_start;
 	dma_addr_t bus_addr;
 	struct fsl_qdma_df **df;
 	void *engine;
@@ -200,7 +282,7 @@ struct fsl_qdma_status_queue {
 	uint16_t complete;
 	uint8_t block_id;
 	void *block_vir;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
 	struct rte_dma_stats stats;
 	dma_addr_t bus_addr;
 	void *engine;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 23/30] dma/dpaa: block dequeue
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (20 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
                   ` (7 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Perform block dequeue to identify which queue of this block is completed.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 252 ++++++++++++++++-------------------
 1 file changed, 116 insertions(+), 136 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 75e92a1b0c..32d8a5b2be 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -442,86 +442,6 @@ fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
 	}
 }
 
-static int
-fsl_qdma_queue_drain(struct fsl_qdma_queue *fsl_queue)
-{
-	uint32_t reg;
-	int count = 0, ret;
-	uint8_t *block = fsl_queue->block_vir;
-	uint16_t *dq_complete, drain_num = 0;
-	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
-
-	while (1) {
-		if (rte_ring_free_count(fsl_queue->complete_pool) <
-			(FSL_QDMA_SG_MAX_ENTRY * 2))
-			break;
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			break;
-
-		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
-		ret = rte_ring_dequeue(fsl_queue->complete_burst,
-			(void **)&dq_complete);
-		if (ret)
-			DPAA_QDMA_ERR("DQ desc number failed!\n");
-
-		ret = rte_ring_dequeue_bulk(fsl_queue->complete_desc,
-			(void **)desc, *dq_complete, NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		fsl_qdma_data_validation(desc, *dq_complete, fsl_queue);
-
-		ret = rte_ring_enqueue_bulk(fsl_queue->complete_pool,
-			(void **)desc, (*dq_complete), NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("EQ %d descs to return queue failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		drain_num += *dq_complete;
-		fsl_queue->complete_start =
-			(fsl_queue->complete_start + (*dq_complete)) &
-			(fsl_queue->pending_max - 1);
-		fsl_queue->stats.completed++;
-
-		count++;
-	}
-
-	return drain_num;
-}
-
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
-	const uint16_t nb_cpls, uint16_t *last_idx,
-	enum rte_dma_status_code *status)
-{
-	int ret;
-	uint16_t dq_num = 0, i;
-	struct fsl_qdma_desc *desc_complete[nb_cpls];
-
-	ret = fsl_qdma_queue_drain(fsl_queue);
-	if (ret < 0) {
-		DPAA_QDMA_ERR("Drain TX%d/Q%d failed!(%d)",
-			fsl_queue->block_id, fsl_queue->queue_id,
-			ret);
-	}
-
-	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
-		(void **)desc_complete, nb_cpls, NULL);
-	for (i = 0; i < dq_num; i++)
-		last_idx[i] = desc_complete[i]->flag;
-
-	if (status) {
-		for (i = 0; i < dq_num; i++)
-			status[i] = RTE_DMA_STATUS_SUCCESSFUL;
-	}
-
-	return dq_num;
-}
-
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -682,13 +602,90 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
+{
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
+
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
+
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+			cmd_queue->stats.completed++;
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret)
+				DPAA_QDMA_ERR("DQ desc number failed!\n");
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
+					*dq_complete, ret);
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("Failed desc eq %d!=%d to %s\n",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
+}
+
 static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
-	int overflow = 0, drain;
-	uint32_t reg, check_num, drain_num;
+	int overflow = 0;
+	uint32_t reg;
+	uint16_t blk_drain, check_num, drain_num;
 	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
@@ -711,11 +708,12 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	drain_num = 0;
 
 drain_again:
-	drain = fsl_qdma_queue_drain(fsl_queue);
-	if (drain <= 0) {
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
 		drain_num++;
 		if (drain_num > 100) {
-			DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 				fsl_queue->block_id, fsl_queue->queue_id,
 				st->submitted - st->completed);
 			return -ENOSPC;
@@ -724,7 +722,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	}
 	check_num++;
 	if (check_num > 10) {
-		DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+		DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 			fsl_queue->block_id, fsl_queue->queue_id,
 			st->submitted - st->completed);
 		return -ENOSPC;
@@ -1059,39 +1057,6 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
-static uint16_t
-dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
-	uint8_t block_id)
-{
-	struct fsl_qdma_status_queue *stat_queue;
-	struct fsl_qdma_queue *cmd_queue;
-	struct fsl_qdma_comp_cmd_desc *cq;
-	uint16_t start, count = 0;
-	uint8_t qid;
-	int ret;
-
-	stat_queue = &fsl_qdma->stat_queues[block_id];
-	cq = stat_queue->cq;
-	start = stat_queue->complete;
-
-	do {
-		ret = qdma_ccdf_get_queue(&cq[start], &qid);
-		if (ret == true) {
-			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
-			cmd_queue->stats.completed++;
-			start++;
-			if (unlikely(start == stat_queue->n_cq))
-				start = 0;
-			count++;
-		} else {
-			break;
-		}
-	} while (1);
-	stat_queue->complete = start;
-
-	return count;
-}
-
 static int
 dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
 {
@@ -1164,22 +1129,32 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_slient)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
 		return 0;
 	}
 
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, st);
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1187,7 +1162,7 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			fsl_queue->stats.errors++;
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static uint16_t
@@ -1196,9 +1171,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_slient)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -1207,13 +1184,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	}
 
 	*has_error = false;
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, NULL);
-	}
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1224,7 +1204,7 @@ dpaa_qdma_dequeue(void *dev_private,
 		}
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 24/30] dma/dpaa: improve congestion handling
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (21 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 23/30] dma/dpaa: block dequeue Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
                   ` (6 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The congestion issue occurs frequently on low speed device(PCIe).
We should drain the command queue to make dma work when congestion occurs.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 157 +++++++++++++++++++----------------
 1 file changed, 85 insertions(+), 72 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 32d8a5b2be..93199cf9a3 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -535,73 +535,6 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static int
-fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
-	int is_burst)
-{
-	uint16_t i, num = fsl_queue->pending_num, idx, start;
-	int ret;
-
-	num = is_burst ? fsl_queue->pending_num : 1;
-
-	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
-	ret = rte_ring_enqueue(fsl_queue->complete_burst,
-			&fsl_queue->desc_in_hw[fsl_queue->ci]);
-	if (ret) {
-		DPAA_QDMA_ERR("%s: Queue is full, try dequeue first",
-			__func__);
-		DPAA_QDMA_ERR("%s: submitted:%"PRIu64", completed:%"PRIu64"",
-			__func__, fsl_queue->stats.submitted,
-			fsl_queue->stats.completed);
-		return ret;
-	}
-	start = fsl_queue->pending_start;
-	for (i = 0; i < num; i++) {
-		idx = (start + i) & (fsl_queue->pending_max - 1);
-		ret = rte_ring_enqueue(fsl_queue->complete_desc,
-				&fsl_queue->pending_desc[idx]);
-		if (ret) {
-			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int
-fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
-	dma_addr_t dst, dma_addr_t src, size_t len)
-{
-	uint8_t *block = fsl_queue->block_vir;
-	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
-	struct fsl_qdma_cmpd_ft *ft;
-	int ret;
-
-	ft = fsl_queue->ft[fsl_queue->ci];
-	csgf_src = &ft->desc_sbuf;
-	csgf_dest = &ft->desc_dbuf;
-	qdma_desc_sge_addr_set64(csgf_src, src);
-	csgf_src->length = len;
-	csgf_src->extion = 0;
-	qdma_desc_sge_addr_set64(csgf_dest, dst);
-	csgf_dest->length = len;
-	csgf_dest->extion = 0;
-	/* This entry is the last entry. */
-	csgf_dest->final = 1;
-
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
-	if (ret)
-		return ret;
-	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
-
-	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-	fsl_queue->stats.submitted++;
-
-	return 0;
-}
-
 static uint16_t
 dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	uint8_t block_id)
@@ -633,7 +566,6 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 		ret = qdma_ccdf_get_queue(&cq[start], &qid);
 		if (ret == true) {
 			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
-			cmd_queue->stats.completed++;
 
 			ret = rte_ring_dequeue(cmd_queue->complete_burst,
 				(void **)&dq_complete);
@@ -677,6 +609,87 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	return count;
 }
 
+static int
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	int is_burst)
+{
+	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
+	int ret, dq_cnt;
+
+	num = is_burst ? fsl_queue->pending_num : 1;
+
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+eq_again:
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		dq_cnt = 0;
+dq_again:
+		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
+			fsl_queue->block_id);
+		dq_cnt++;
+		if (dq > 0) {
+			goto eq_again;
+		} else {
+			if (dq_cnt < 100)
+				goto dq_again;
+			DPAA_QDMA_ERR("%s: Dq block%d failed!",
+				__func__, fsl_queue->block_id);
+		}
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
+{
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	int ret;
+
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	return 0;
+}
+
 static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
@@ -702,7 +715,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	if (likely(!overflow))
 		return 0;
 
-	DPAA_QDMA_ERR("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
 		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
 	drain_num = 0;
@@ -712,7 +725,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 		fsl_queue->block_id);
 	if (!blk_drain) {
 		drain_num++;
-		if (drain_num > 100) {
+		if (drain_num > 1000) {
 			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 				fsl_queue->block_id, fsl_queue->queue_id,
 				st->submitted - st->completed);
@@ -721,8 +734,8 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 		goto drain_again;
 	}
 	check_num++;
-	if (check_num > 10) {
-		DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+	if (check_num > 1000) {
+		DPAA_QDMA_ERR("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
 			fsl_queue->block_id, fsl_queue->queue_id,
 			st->submitted - st->completed);
 		return -ENOSPC;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 25/30] dma/dpaa: disable SG descriptor as default
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (22 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
                   ` (5 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Burst operation used for SG copy as default until SG issue is fixed.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 93199cf9a3..1c8334291a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -11,7 +11,7 @@
 
 static int s_data_validation;
 static int s_hw_err_check;
-static int s_sg_disable;
+static int s_sg_disable = 1;
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -335,7 +335,6 @@ fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
 	rte_free(queue->cq);
 }
 
-
 static int
 fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	uint32_t block_id)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (23 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
                   ` (4 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Fix issue of ERRATA 050757/050265 workaround which is not effective
in burst mode.

SDF/DDF is referred by first entry of compound frame table, move the DF to
compound frame table description which is suitable to adapt single copy
and SG/burst copy.

Fix SG issue which was caused by memset clearing phy address of SGE in
compound frame table.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 215 +++++++++++++++++------------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 +-
 2 files changed, 107 insertions(+), 115 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 1c8334291a..690ad5a6ff 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -11,7 +11,10 @@
 
 static int s_data_validation;
 static int s_hw_err_check;
-static int s_sg_disable = 1;
+static int s_sg_enable = 1;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+static int s_pci_read = 1;
+#endif
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -126,10 +129,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	struct fsl_qdma_comp_cmd_desc *ccdf;
 	uint16_t i, j;
 	struct fsl_qdma_cmpd_ft *ft;
-	struct fsl_qdma_df *df;
 
 	for (i = 0; i < queue->n_cq; i++) {
-		dma_addr_t phy_ft = 0, phy_df = 0;
+		dma_addr_t phy_ft = 0;
 
 		queue->ft[i] = dma_pool_alloc(NULL,
 			sizeof(struct fsl_qdma_cmpd_ft),
@@ -156,25 +158,14 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
 		queue->ft[i]->phy_dsge = phy_ft +
 			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
-
-		queue->df[i] = dma_pool_alloc(NULL,
-			sizeof(struct fsl_qdma_df),
-			RTE_CACHE_LINE_SIZE, &phy_df);
-		if (!queue->df[i]) {
-			rte_free(queue->ft[i]);
-			queue->ft[i] = NULL;
-			goto fail;
-		}
-
-		memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
-		memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
 
 		ft = queue->ft[i];
-		df = queue->df[i];
-		sdf = &df->sdf;
-		ddf = &df->ddf;
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_sge_addr_set64(&ft->desc_buf, phy_df);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
 		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
 
@@ -198,10 +189,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	return 0;
 
 fail:
-	for (j = 0; j < i; j++) {
+	for (j = 0; j < i; j++)
 		rte_free(queue->ft[j]);
-		rte_free(queue->df[j]);
-	}
 
 	return -ENOMEM;
 }
@@ -247,23 +236,12 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
 	}
-	sprintf(nm, "Descriptor Buf_%d_%d",
-		block_id, queue_id);
-	cmd_queue->df = rte_zmalloc(nm,
-			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!cmd_queue->df) {
-		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->ft);
-		rte_free(cmd_queue->cq);
-		return -ENOMEM;
-	}
 	sprintf(nm, "Pending_desc_%d_%d",
 		block_id, queue_id);
 	cmd_queue->pending_desc = rte_zmalloc(nm,
 		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
 	if (!cmd_queue->pending_desc) {
 		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->df);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
@@ -278,7 +256,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-desc_ring_%d_%d",
@@ -292,7 +269,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-pool-desc_ring_%d_%d",
@@ -307,7 +283,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 
@@ -320,7 +295,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 static void
 fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 {
-	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
 	rte_free(queue->pending_desc);
@@ -664,8 +638,30 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ft = fsl_queue->ft[fsl_queue->ci];
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -745,7 +741,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
 	uint8_t *block = fsl_queue->block_vir, i;
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
@@ -758,74 +754,10 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	struct fsl_qdma_sdf *sdf;
 #endif
 
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
-
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = &fsl_queue->df[fsl_queue->ci]->sdf;
-	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->prefetch = 1;
-#endif
-#endif
-
-	if (num == 1) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-		if (fsl_queue->pending_desc[start].len >
-			FSL_QDMA_CMD_SSS_DISTANCE) {
-			sdf->ssen = 1;
-			sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-			sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-		} else {
-			sdf->sss = 0;
-			sdf->ssd = 0;
-		}
-#endif
-		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-			fsl_queue->pending_desc[start].dst,
-			fsl_queue->pending_desc[start].src,
-			fsl_queue->pending_desc[start].len);
-		if (!ret) {
-			fsl_queue->pending_start =
-				(start + 1) & (fsl_queue->pending_max - 1);
-			fsl_queue->pending_num = 0;
-		}
-		return ret;
-	} else if (s_sg_disable) {
-		while (fsl_queue->pending_num > 0) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-			if (fsl_queue->pending_desc[start].len >
-				FSL_QDMA_CMD_SSS_DISTANCE) {
-				sdf->ssen = 1;
-				sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-				sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-			} else {
-				sdf->sss = 0;
-				sdf->ssd = 0;
-			}
-#endif
-			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-				fsl_queue->pending_desc[start].dst,
-				fsl_queue->pending_desc[start].src,
-				fsl_queue->pending_desc[start].len);
-			if (!ret) {
-				start = (start + 1) &
-					(fsl_queue->pending_max - 1);
-				fsl_queue->pending_start = start;
-				fsl_queue->pending_num--;
-			} else {
-				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
-					ret);
-				return -EIO;
-			}
-		}
 
-		return 0;
-	}
 	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
 	csgf_src->extion = 1;
 	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
@@ -849,13 +781,21 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	if (total_len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->ssen = 1;
-		sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-		sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-	} else {
-		sdf->sss = 0;
-		sdf->ssd = 0;
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
 	}
 #endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
@@ -875,6 +815,51 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	return 0;
 }
 
+static int
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+{
+	uint16_t start = fsl_queue->pending_start;
+	int ret;
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	} else if (!s_sg_enable) {
+		while (fsl_queue->pending_num > 0) {
+			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+				fsl_queue->pending_desc[start].dst,
+				fsl_queue->pending_desc[start].src,
+				fsl_queue->pending_desc[start].len);
+			if (!ret) {
+				start = (start + 1) &
+					(fsl_queue->pending_max - 1);
+				fsl_queue->pending_start = start;
+				fsl_queue->pending_num--;
+			} else {
+				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
+					ret);
+				return -EIO;
+			}
+		}
+
+		return 0;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
+
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 	__rte_unused uint32_t info_sz)
@@ -1276,6 +1261,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int regs_size;
 	int ret;
 	uint32_t i, j, k;
+	char *penv;
 
 	if (getenv("DPAA_QDMA_DATA_VALIDATION"))
 		s_data_validation = 1;
@@ -1283,8 +1269,15 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (getenv("DPAA_QDMA_HW_ERR_CHECK"))
 		s_hw_err_check = 1;
 
-	if (getenv("DPAA_QDMA_SG_DISABLE"))
-		s_sg_disable = 1;
+	penv = getenv("DPAA_QDMA_SG_ENABLE");
+	if (penv)
+		s_sg_enable = atoi(penv);
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	penv = getenv("DPAA_QDMA_PCI_READ");
+	if (penv)
+		s_pci_read = atoi(penv);
+#endif
 
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index f4535af3dd..906d452d48 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -88,9 +88,7 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CFG_SSS_OFFSET		12
-#define FSL_QDMA_CMD_SSS_STRIDE		128
-#define FSL_QDMA_CMD_SSS_DISTANCE	128
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
@@ -192,8 +190,10 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t cache_align[2];
 	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
 	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
 	uint64_t phy_ssge;
 	uint64_t phy_dsge;
+	uint64_t phy_df;
 } __rte_packed;
 
 #define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
@@ -273,7 +273,6 @@ struct fsl_qdma_queue {
 	uint8_t pending_num;
 	uint16_t complete_start;
 	dma_addr_t bus_addr;
-	struct fsl_qdma_df **df;
 	void *engine;
 };
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 27/30] dma/dpaa: improve silent mode support
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (24 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
                   ` (3 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Don't save eq context in silent mode, check HW status only to
identify if queue is full.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 690ad5a6ff..b059d9b7ea 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -586,9 +586,13 @@ static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	int is_burst)
 {
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
 	int ret, dq_cnt;
 
+	if (fsl_qdma->is_slient)
+		return 0;
+
 	num = is_burst ? fsl_queue->pending_num : 1;
 
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
@@ -697,7 +701,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 
 	check_num = 0;
 overflow_check:
-	if (unlikely(s_hw_err_check)) {
+	if (fsl_qdma->is_slient || unlikely(s_hw_err_check)) {
 		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -707,8 +711,14 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 			QDMA_QUEUE_CR_WM) ? 1 : 0;
 	}
 
-	if (likely(!overflow))
+	if (likely(!overflow)) {
 		return 0;
+	} else if (fsl_qdma->is_slient) {
+		check_num++;
+		if (check_num < 1000)
+			goto overflow_check;
+		return -ENOSPC;
+	}
 
 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 28/30] dma/dpaa: support multiple SG copies
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (25 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
                   ` (2 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Split burst copies to multiple SG copies if burst number exceeds
max number of SG entries.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 180 +++++++++++++++++++----------------
 drivers/dma/dpaa/dpaa_qdma.h |   2 +-
 2 files changed, 98 insertions(+), 84 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index b059d9b7ea..d0df97447a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -584,17 +584,15 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 
 static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
-	int is_burst)
+	uint16_t num)
 {
 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
-	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
+	uint16_t i, idx, start, dq;
 	int ret, dq_cnt;
 
 	if (fsl_qdma->is_slient)
 		return 0;
 
-	num = is_burst ? fsl_queue->pending_num : 1;
-
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
 eq_again:
 	ret = rte_ring_enqueue(fsl_queue->complete_burst,
@@ -634,6 +632,69 @@ fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0;
+	uint32_t reg;
+	uint16_t blk_drain, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
+
+	check_num = 0;
+overflow_check:
+	if (fsl_qdma->is_slient || unlikely(s_hw_err_check)) {
+		reg = qdma_readl_be(block +
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
+
+	if (likely(!overflow)) {
+		return 0;
+	} else if (fsl_qdma->is_slient) {
+		check_num++;
+		if (check_num >= 10000) {
+			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
+			check_num = 0;
+		}
+		goto overflow_check;
+	}
+
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
+		drain_num++;
+		if (drain_num >= 10000) {
+			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			drain_num = 0;
+		}
+		goto drain_again;
+	}
+	check_num++;
+	if (check_num >= 1000) {
+		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
+		check_num = 0;
+	}
+	goto overflow_check;
+
+	return 0;
+}
+
 static int
 fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	dma_addr_t dst, dma_addr_t src, size_t len)
@@ -646,6 +707,10 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_sdf *sdf;
 #endif
 
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	ft = fsl_queue->ft[fsl_queue->ci];
 
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
@@ -677,7 +742,7 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	/* This entry is the last entry. */
 	csgf_dest->final = 1;
 
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
 	if (ret)
 		return ret;
 	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
@@ -689,81 +754,30 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
-static int
-fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
-{
-	int overflow = 0;
-	uint32_t reg;
-	uint16_t blk_drain, check_num, drain_num;
-	uint8_t *block = fsl_queue->block_vir;
-	const struct rte_dma_stats *st = &fsl_queue->stats;
-	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
-
-	check_num = 0;
-overflow_check:
-	if (fsl_qdma->is_slient || unlikely(s_hw_err_check)) {
-		reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
-		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
-			1 : 0;
-	} else {
-		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
-			QDMA_QUEUE_CR_WM) ? 1 : 0;
-	}
-
-	if (likely(!overflow)) {
-		return 0;
-	} else if (fsl_qdma->is_slient) {
-		check_num++;
-		if (check_num < 1000)
-			goto overflow_check;
-		return -ENOSPC;
-	}
-
-	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
-		fsl_queue->block_id, fsl_queue->queue_id,
-		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
-	drain_num = 0;
-
-drain_again:
-	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
-		fsl_queue->block_id);
-	if (!blk_drain) {
-		drain_num++;
-		if (drain_num > 1000) {
-			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
-				fsl_queue->block_id, fsl_queue->queue_id,
-				st->submitted - st->completed);
-			return -ENOSPC;
-		}
-		goto drain_again;
-	}
-	check_num++;
-	if (check_num > 1000) {
-		DPAA_QDMA_ERR("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
-			fsl_queue->block_id, fsl_queue->queue_id,
-			st->submitted - st->completed);
-		return -ENOSPC;
-	}
-	goto overflow_check;
-
-	return -ENOSPC;
-}
-
 static int
 fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
-	uint8_t *block = fsl_queue->block_vir, i;
+	uint8_t *block = fsl_queue->block_vir;
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
-	uint32_t total_len = 0;
-	uint8_t num = fsl_queue->pending_num;
-	uint16_t start = fsl_queue->pending_start, idx;
+	uint32_t total_len;
+	uint16_t start, idx, num, i;
 	int ret;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
 #endif
 
+eq_sg:
+	total_len = 0;
+	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
+		num = FSL_QDMA_SG_MAX_ENTRY;
+	else
+		num = fsl_queue->pending_num;
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
@@ -808,7 +822,7 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 		}
 	}
 #endif
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
 	if (ret)
 		return ret;
 
@@ -820,7 +834,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 
 	fsl_queue->pending_start =
 		(start + num) & (fsl_queue->pending_max - 1);
-	fsl_queue->pending_num = 0;
+	fsl_queue->pending_num -= num;
+	if (fsl_queue->pending_num > 0)
+		goto eq_sg;
 
 	return 0;
 }
@@ -831,10 +847,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	uint16_t start = fsl_queue->pending_start;
 	int ret;
 
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
-
 	if (fsl_queue->pending_num == 1) {
 		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
 			fsl_queue->pending_desc[start].dst,
@@ -871,17 +883,19 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 }
 
 static int
-dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	__rte_unused uint32_t info_sz)
+dpaa_qdma_info_get(const struct rte_dma_dev *dev,
+	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
 	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = fsl_qdma->n_queues;
 	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
 	dev_info->min_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
 
 	return 0;
 }
@@ -985,9 +999,9 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	uint16_t idx;
 	int ret;
 
-	if (pending >= FSL_QDMA_SG_MAX_ENTRY) {
-		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
-			vchan);
+	if (pending >= fsl_queue->pending_max) {
+		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
+			pending, vchan);
 		return -ENOSPC;
 	}
 	idx = (start + pending) & (fsl_queue->pending_max - 1);
@@ -1253,7 +1267,7 @@ dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
-	.dev_info_get		  = dpaa_info_get,
+	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 906d452d48..35791fec8e 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -270,7 +270,7 @@ struct fsl_qdma_queue {
 	struct fsl_qdma_desc *pending_desc;
 	uint16_t pending_max;
 	uint16_t pending_start;
-	uint8_t pending_num;
+	uint16_t pending_num;
 	uint16_t complete_start;
 	dma_addr_t bus_addr;
 	void *engine;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 29/30] dma/dpaa: support max SG entry size
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (26 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-19 10:01 ` [PATCH 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

SG transaction is not helpful for performance of large transaction size.
Start single transaction for size > max SG entry size in SG copy.

Default MAX SG entry size is 2000 bytes which is experiment data of
mem to mem, user can change it according to experiment:
export DPAA_QDMA_SG_MAX_ENTRY_SIZE=xxx

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 41 ++++++++++++++++++++++++++++++++----
 1 file changed, 37 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index d0df97447a..bc925fc521 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -12,6 +12,8 @@
 static int s_data_validation;
 static int s_hw_err_check;
 static int s_sg_enable = 1;
+static uint32_t s_sg_max_entry_sz = 2000;
+
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 static int s_pci_read = 1;
 #endif
@@ -761,7 +763,7 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	uint32_t total_len;
-	uint16_t start, idx, num, i;
+	uint16_t start, idx, num, i, next_idx;
 	int ret;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
@@ -770,13 +772,31 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 eq_sg:
 	total_len = 0;
 	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
+		fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num--;
+		}
+		if (fsl_queue->pending_num > 0)
+			goto eq_sg;
+
+		return ret;
+	}
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
 		num = FSL_QDMA_SG_MAX_ENTRY;
 	else
 		num = fsl_queue->pending_num;
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
@@ -799,7 +819,16 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
 		ft->desc_dsge[i].final = 0;
 		total_len += fsl_queue->pending_desc[idx].len;
+		if ((i + 1) != num) {
+			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
+			if (fsl_queue->pending_desc[next_idx].len >
+				s_sg_max_entry_sz) {
+				num = i + 1;
+				break;
+			}
+		}
 	}
+
 	ft->desc_ssge[num - 1].final = 1;
 	ft->desc_dsge[num - 1].final = 1;
 	csgf_src->length = total_len;
@@ -1297,6 +1326,10 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (penv)
 		s_sg_enable = atoi(penv);
 
+	penv = getenv("DPAA_QDMA_SG_MAX_ENTRY_SIZE");
+	if (penv)
+		s_sg_max_entry_sz = atoi(penv);
+
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	penv = getenv("DPAA_QDMA_PCI_READ");
 	if (penv)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [PATCH 30/30] bus/dpaa: add port bmi stats
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (27 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
@ 2024-07-19 10:01 ` Gagandeep Singh
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-19 10:01 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add BMI statistics and fixing the existing extented
statistics

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 65 +++++++++++++++++++++++++++-
 drivers/bus/dpaa/include/fman.h      |  4 +-
 drivers/bus/dpaa/include/fsl_fman.h  | 12 +++++
 drivers/bus/dpaa/version.map         |  4 ++
 drivers/net/dpaa/dpaa_ethdev.c       | 46 +++++++++++++++++---
 drivers/net/dpaa/dpaa_ethdev.h       | 12 +++++
 6 files changed, 134 insertions(+), 9 deletions(-)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 24a99f7235..27b39a4975 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -244,8 +244,8 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
 
 	for (i = 0; i < n; i++)
-		value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) |
-				(u64)in_be32((char *)regs + base_offset +
+		value[i] = ((u64)in_be32((char *)regs + base_offset + 8 * i) |
+				((u64)in_be32((char *)regs + base_offset +
 				8 * i + 4)) << 32);
 }
 
@@ -266,6 +266,67 @@ fman_if_stats_reset(struct fman_if *p)
 		;
 }
 
+void
+fman_if_bmi_stats_enable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp |= FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_disable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp &= ~FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	int i = 0;
+
+	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
+	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
+}
+
+void
+fman_if_bmi_stats_reset(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+
+	out_be32(&regs->fmbm_rfrc, 0);
+	out_be32(&regs->fmbm_rfbc, 0);
+	out_be32(&regs->fmbm_rlfc, 0);
+	out_be32(&regs->fmbm_rffc, 0);
+	out_be32(&regs->fmbm_rfdc, 0);
+	out_be32(&regs->fmbm_rfldec, 0);
+	out_be32(&regs->fmbm_rodc, 0);
+	out_be32(&regs->fmbm_rbdc, 0);
+}
+
 void
 fman_if_promiscuous_enable(struct fman_if *p)
 {
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index 3a6dd555a7..60681068ea 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -56,6 +56,8 @@
 #define FMAN_PORT_BMI_FIFO_UNITS	0x100
 #define FMAN_PORT_IC_OFFSET_UNITS	0x10
 
+#define FMAN_BMI_COUNTERS_EN 0x80000000
+
 #define FMAN_ENABLE_BPOOL_DEPLETION	0xF00000F0
 
 #define HASH_CTRL_MCAST_EN	0x00000100
@@ -260,7 +262,7 @@ struct rx_bmi_regs {
 					/**< Buffer Manager pool Information-*/
 	uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
 					/**< Allocate Counter-*/
-	uint32_t reserved0130[8];
+	uint32_t reserved0120[16];
 					/**< 0x130/0x140 - 0x15F reserved -*/
 	uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
 					/**< Congestion Group Map*/
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 20690f8329..5a9750ad0c 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -60,6 +60,18 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
+__rte_internal
+void fman_if_bmi_stats_enable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_disable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value);
+
+__rte_internal
+void fman_if_bmi_stats_reset(struct fman_if *p);
+
 /* Set ignore pause option for a specific interface */
 void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
 
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 3f547f75cf..a17d57632e 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -24,6 +24,10 @@ INTERNAL {
 	fman_dealloc_bufs_mask_hi;
 	fman_dealloc_bufs_mask_lo;
 	fman_if_add_mac_addr;
+	fman_if_bmi_stats_enable;
+	fman_if_bmi_stats_disable;
+	fman_if_bmi_stats_get_all;
+	fman_if_bmi_stats_reset;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
 	fman_if_discard_rx_errors;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 060b8c678f..4d9a4c7e6d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -131,6 +131,22 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 		offsetof(struct dpaa_if_stats, tvlan)},
 	{"rx_undersized",
 		offsetof(struct dpaa_if_stats, tund)},
+	{"rx_frame_counter",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
+	{"rx_bad_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
+	{"rx_large_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
+	{"rx_filter_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
+	{"rx_frame_discrad_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
+	{"rx_frame_list_dma_err_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
+	{"rx_out_of_buffer_discard ",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
+	{"rx_buf_diallocate",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
@@ -430,6 +446,7 @@ static void dpaa_interrupt_handler(void *param)
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
 	uint16_t i;
 
 	PMD_INIT_FUNC_TRACE();
@@ -443,7 +460,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 	else
 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
 
-	fman_if_enable_rx(dev->process_private);
+	fman_if_bmi_stats_enable(fif);
+	fman_if_bmi_stats_reset(fif);
+	fman_if_enable_rx(fif);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -461,8 +480,10 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 	dev->data->dev_started = 0;
 
-	if (!fif->is_shared_mac)
+	if (!fif->is_shared_mac) {
+		fman_if_bmi_stats_disable(fif);
 		fman_if_disable_rx(fif);
+	}
 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -769,6 +790,7 @@ static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	fman_if_stats_reset(dev->process_private);
+	fman_if_bmi_stats_reset(dev->process_private);
 
 	return 0;
 }
@@ -777,8 +799,9 @@ static int
 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned int n)
 {
-	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (n < num)
 		return num;
@@ -789,10 +812,16 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	fman_if_stats_get_all(dev->process_private, values,
 			      sizeof(struct dpaa_if_stats) / 8);
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < num - (bmi_count - 1); i++) {
 		xstats[i].id = i;
 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
 	}
+	fman_if_bmi_stats_get_all(dev->process_private, values);
+	for (j = 0; i < num; i++, j++) {
+		xstats[i].id = i;
+		xstats[i].value = values[j];
+	}
+
 	return i;
 }
 
@@ -819,8 +848,9 @@ static int
 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		      uint64_t *values, unsigned int n)
 {
-	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (!ids) {
 		if (n < stat_cnt)
@@ -832,10 +862,14 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		fman_if_stats_get_all(dev->process_private, values_copy,
 				      sizeof(struct dpaa_if_stats) / 8);
 
-		for (i = 0; i < stat_cnt; i++)
+		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
 			values[i] =
 				values_copy[dpaa_xstats_strings[i].offset / 8];
 
+		fman_if_bmi_stats_get_all(dev->process_private, values);
+		for (j = 0; i < stat_cnt; i++, j++)
+			values[i] = values_copy[j];
+
 		return stat_cnt;
 	}
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b6c61b8b6b..261a5a3ca7 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -212,6 +212,18 @@ dpaa_rx_cb_atomic(void *event,
 		  const struct qm_dqrr_entry *dqrr,
 		  void **bufs);
 
+struct dpaa_if_rx_bmi_stats {
+	uint32_t fmbm_rstc;		/**< Rx Statistics Counters*/
+	uint32_t fmbm_rfrc;		/**< Rx Frame Counter*/
+	uint32_t fmbm_rfbc;		/**< Rx Bad Frames Counter*/
+	uint32_t fmbm_rlfc;		/**< Rx Large Frames Counter*/
+	uint32_t fmbm_rffc;		/**< Rx Filter Frames Counter*/
+	uint32_t fmbm_rfdc;		/**< Rx Frame Discard Counter*/
+	uint32_t fmbm_rfldec;		/**< Rx Frames List DMA Error Counter*/
+	uint32_t fmbm_rodc;		/**< Rx Out of Buffers Discard nntr*/
+	uint32_t fmbm_rbdc;		/**< Rx Buffers Deallocate Counter*/
+};
+
 /* PMD related logs */
 extern int dpaa_logtype_pmd;
 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 00/30] NXP DMA driver fixes and Enhancements
  2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                   ` (28 preceding siblings ...)
  2024-07-19 10:01 ` [PATCH 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
@ 2024-07-22 11:58 ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                     ` (30 more replies)
  29 siblings, 31 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev

V2 changes:
* fix compilation issue on ubuntu 22.04

Gagandeep Singh (6):
  dma/dpaa: support multi channels
  dma/dpaa: fix job enqueue
  dma/dpaa: add workaround for ERR050757
  dma/dpaa: qdma stall workaround for ERR050265
  dma/dpaa: remove unwanted desc
  dma/dpaa: data path optimization

Hemant Agrawal (1):
  bus/dpaa: add port bmi stats

Jun Yang (22):
  dma/dpaa2: configure route by port by PCIe port param
  dma/dpaa2: support multiple HW queues
  dma/dpaa2: adapt DMA driver API
  dma/dpaa2: multiple process support
  dma/dpaa2: add sanity check for SG entry
  dma/dpaa2: include DPAA2 specific header files
  dma/dpaa2: borrow flags of DMA operation to pass job context
  bus/fslmc: enhance the qbman dq storage logic
  dma/dpaa2: add short FD support
  dma/dpaa2: limit the max descriptor number
  dma/dpaa2: change the DMA copy return value
  dma/dpaa2: move the qdma header to common place
  dma/dpaa: refactor driver
  dma/dpaa: dequeue status queue
  dma/dpaa: add Scatter Gather support
  dma/dpaa: block dequeue
  dma/dpaa: improve congestion handling
  dma/dpaa: disable SG descriptor as default
  dma/dpaa: improve ERRATA workaround solution
  dma/dpaa: improve silent mode support
  dma/dpaa: support multiple SG copies
  dma/dpaa: support max SG entry size

Vanshika Shukla (1):
  dma/dpaa: add burst capacity API

 config/arm/meson.build                        |    4 +-
 doc/api/doxy-api-index.md                     |    2 +-
 doc/api/doxy-api.conf.in                      |    2 +-
 doc/guides/dmadevs/dpaa.rst                   |    3 +
 drivers/bus/dpaa/base/fman/fman_hw.c          |   65 +-
 drivers/bus/dpaa/include/fman.h               |    4 +-
 drivers/bus/dpaa/include/fsl_fman.h           |   12 +
 drivers/bus/dpaa/version.map                  |    4 +
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c      |   25 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |    7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |   38 +-
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  |   29 +-
 drivers/common/dpaax/meson.build              |    3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h     |   23 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c   |   23 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c   |    4 +-
 drivers/dma/dpaa/dpaa_qdma.c                  | 1641 +++++++-----
 drivers/dma/dpaa/dpaa_qdma.h                  |  289 +-
 drivers/dma/dpaa2/dpaa2_qdma.c                | 2381 +++++++++--------
 drivers/dma/dpaa2/dpaa2_qdma.h                |  243 +-
 drivers/dma/dpaa2/meson.build                 |    4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        |  177 --
 drivers/dma/dpaa2/version.map                 |   14 -
 drivers/net/dpaa/dpaa_ethdev.c                |   46 +-
 drivers/net/dpaa/dpaa_ethdev.h                |   12 +
 drivers/net/dpaa2/dpaa2_ethdev.c              |   81 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   19 +-
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c         |    4 +-
 28 files changed, 2856 insertions(+), 2303 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
 delete mode 100644 drivers/dma/dpaa2/version.map

-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 01/30] dma/dpaa2: configure route by port by PCIe port param
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
                     ` (29 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

struct {
	uint64_t coreid : 4; /**--rbp.sportid / rbp.dportid*/
	uint64_t pfid : 8; /**--rbp.spfid / rbp.dpfid*/
	uint64_t vfen : 1; /**--rbp.svfa / rbp.dvfa*/
	uint64_t vfid : 16; /**--rbp.svfid / rbp.dvfid*/
	.....
} pcie;

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  | 29 ++++++---
 drivers/dma/dpaa2/dpaa2_qdma.c                | 59 +++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h                | 38 +++++++++++-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        | 55 +----------------
 drivers/dma/dpaa2/version.map                 |  1 -
 5 files changed, 100 insertions(+), 82 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48ffb1b46e..7528b610e1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
  *
  */
 #ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
 			uint32_t saddr_hi;
 
 			uint32_t len_sl:18;
-			uint32_t rsv1:14;
-
+			uint32_t rsv13:2;
+			uint32_t svfid:6;
+			uint32_t rsv12:2;
+			uint32_t spfid:2;
+			uint32_t rsv1:2;
 			uint32_t sportid:4;
-			uint32_t rsv2:22;
+			uint32_t rsv2:1;
+			uint32_t sca:1;
+			uint32_t sat:2;
+			uint32_t sattr:3;
+			uint32_t svfa:1;
+			uint32_t stc:3;
 			uint32_t bmt:1;
-			uint32_t rsv3:1;
+			uint32_t dvfid:6;
+			uint32_t rsv3:2;
+			uint32_t dpfid:2;
+			uint32_t rsv31:2;
 			uint32_t fmt:2;
 			uint32_t sl:1;
 			uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
 			uint32_t acc_err:4;
 			uint32_t rsv5:4;
 			uint32_t ser:1;
-			uint32_t rsv6:3;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
 			uint32_t wrttype:4;
 			uint32_t dqos:3;
 			uint32_t drbp:1;
 			uint32_t dlwc:2;
-			uint32_t rsv7:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
 			uint32_t rdttype:4;
 			uint32_t sqos:3;
 			uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
 			uint32_t saddr_lo;
 
 			uint32_t saddr_hi:17;
-			uint32_t rsv1:15;
+			uint32_t rsv1_att:15;
 
 			uint32_t len;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 2c91ceec13..5954b552b5 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
 static inline int
 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
 		     uint32_t len, struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_rbp *rbp, int ser)
+		     struct dpaa2_qdma_rbp *rbp, int ser)
 {
 	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
 	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 			uint64_t fle_iova,
-			struct rte_dpaa2_qdma_rbp *rbp,
+			struct dpaa2_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags, uint32_t fmt)
 {
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* source */
 		sdd->read_cmd.portid = rbp->sportid;
 		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->svfid;
 
 		if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* destination */
 		sdd->write_cmd.portid = rbp->dportid;
 		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->dvfid;
 
 		if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
 		     struct rte_dpaa2_qdma_job **job,
 		     uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	size_t iova;
 	int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
 				  struct rte_dpaa2_qdma_job **job,
 				  uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
 			   struct rte_dpaa2_qdma_job **job,
 			   uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
 			struct rte_dpaa2_qdma_job **job,
 			uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	void *elem;
 	struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
 	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
 }
 
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-				struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+	const struct rte_dma_vchan_conf *conf)
 {
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.dportid = conf->dst_port.pcie.coreid;
+		vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+		if (conf->dst_port.pcie.vfen) {
+			vq->rbp.dvfa = 1;
+			vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+		}
+		vq->rbp.drbp = 1;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.sportid = conf->src_port.pcie.coreid;
+		vq->rbp.spfid = conf->src_port.pcie.pfid;
+		if (conf->src_port.pcie.vfen) {
+			vq->rbp.svfa = 1;
+			vq->rbp.dvfid = conf->src_port.pcie.vfid;
+		}
+		vq->rbp.srbp = 1;
+	}
 
-	memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
-			sizeof(struct rte_dpaa2_qdma_rbp));
+	return 0;
 }
 
 static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
-	int sg_enable = 0;
+	int sg_enable = 0, ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
+	ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+	if (ret)
+		return ret;
+
 	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
 		sg_enable = 1;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 5941b5a5d3..811906fcbc 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
 	};
 } __rte_packed;
 
+struct dpaa2_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	uint32_t dvfa:1;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	uint32_t svfa:1;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:2;
+};
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
 	/** Route by port */
-	struct rte_dpaa2_qdma_rbp rbp;
+	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
 	/** States if this vq has exclusively associated hw queue */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 5a8da46d12..b0bf9d8bcc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -13,42 +13,6 @@
 /** States if the destination addresses is physical. */
 #define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
 
-struct rte_dpaa2_qdma_rbp {
-	uint32_t use_ultrashort:1;
-	uint32_t enable:1;
-	/**
-	 * dportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t dportid:4;
-	uint32_t dpfid:2;
-	uint32_t dvfid:6;
-	/*using route by port for destination */
-	uint32_t drbp:1;
-	/**
-	 * sportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t sportid:4;
-	uint32_t spfid:2;
-	uint32_t svfid:6;
-	/* using route by port for source */
-	uint32_t srbp:1;
-	/* Virtual Function Active */
-	uint32_t vfa:1;
-	uint32_t rsv:3;
-};
-
 /** Determines a QDMA job */
 struct rte_dpaa2_qdma_job {
 	/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
 	 */
 	uint16_t status;
 	uint16_t vq_id;
+	uint64_t cnxt;
 	/**
 	 * FLE pool element maintained by user, in case no qDMA response.
 	 * Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param rbp_config
- *   Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_rbp *rbp_config);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
index 713ed41f0c..eb012cfbfc 100644
--- a/drivers/dma/dpaa2/version.map
+++ b/drivers/dma/dpaa2/version.map
@@ -10,5 +10,4 @@ EXPERIMENTAL {
 	rte_dpaa2_qdma_copy_multi;
 	rte_dpaa2_qdma_vchan_fd_us_enable;
 	rte_dpaa2_qdma_vchan_internal_sg_enable;
-	rte_dpaa2_qdma_vchan_rbp_enable;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 02/30] dma/dpaa2: support multiple HW queues
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-07-22 11:58   ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
                     ` (28 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Initialize and Configure queues of dma device according to hw queues
supported from mc bus.
Because multiple queues per device are supported, virt queues
implementation are dropped.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 312 +++++++++++++++------------------
 drivers/dma/dpaa2/dpaa2_qdma.h |   6 +-
 2 files changed, 140 insertions(+), 178 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5954b552b5..945ba71e4a 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -478,9 +478,9 @@ dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
 
 static inline uint16_t
 dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-			     const struct qbman_fd *fd,
-			     struct rte_dpaa2_qdma_job **job,
-			     uint16_t *nb_jobs)
+	const struct qbman_fd *fd,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t *nb_jobs)
 {
 	struct qbman_fle *fle;
 	struct rte_dpaa2_qdma_job **ppjob = NULL;
@@ -512,9 +512,9 @@ dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
 
 static inline uint16_t
 dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-			 const struct qbman_fd *fd,
-			 struct rte_dpaa2_qdma_job **job,
-			 uint16_t *nb_jobs)
+	const struct qbman_fd *fd,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t *nb_jobs)
 {
 	struct qbman_fle *fle;
 	struct rte_dpaa2_qdma_job **ppjob = NULL;
@@ -548,12 +548,12 @@ dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
 /* Function to receive a QDMA job for a given device and queue*/
 static int
 dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-				     uint16_t *vq_id,
-				     struct rte_dpaa2_qdma_job **job,
-				     uint16_t nb_jobs)
+	uint16_t *vq_id,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
@@ -562,7 +562,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
+	uint16_t rx_fqid;
 	int ret, pull_size;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
@@ -575,15 +575,17 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
 				rte_gettid());
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
+	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
+	rx_fqid = rxq->fqid;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_jobs > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_jobs;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -697,12 +699,12 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 
 static int
 dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-					uint16_t *vq_id,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs)
+	uint16_t *vq_id,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
@@ -710,7 +712,7 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
+	uint16_t rx_fqid;
 	int ret, next_pull, num_pulled = 0;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
@@ -725,15 +727,15 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
 				rte_gettid());
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	rxq = &(dpdmai_dev->rx_queue[0]);
+	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
+	rx_fqid = rxq->fqid;
 
 	do {
 		dq_storage = rxq->q_storage->dq_storage[0];
@@ -810,7 +812,7 @@ dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
 			uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
 	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
 	struct qbman_eq_desc eqdesc;
 	struct qbman_swp *swp;
@@ -931,8 +933,8 @@ dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 
 static int
 dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-		   rte_iova_t src, rte_iova_t dst,
-		   uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -966,8 +968,8 @@ dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
 
 int
 rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-			  struct rte_dpaa2_qdma_job **jobs,
-			  uint16_t nb_cpls)
+	struct rte_dpaa2_qdma_job **jobs,
+	uint16_t nb_cpls)
 {
 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
 	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
@@ -978,14 +980,11 @@ rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
 }
 
 static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
-			 struct qdma_virt_queue *qdma_vq,
-			 struct rte_dpaa2_qdma_job **jobs,
-			 uint16_t nb_jobs)
+dpaa2_qdma_dequeue_multi(struct qdma_virt_queue *qdma_vq,
+	struct rte_dpaa2_qdma_job **jobs,
+	uint16_t nb_jobs)
 {
-	struct qdma_virt_queue *temp_qdma_vq;
-	int ring_count;
-	int ret = 0, i;
+	int ret;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
 		/** Make sure there are enough space to get jobs.*/
@@ -1002,42 +1001,12 @@ dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
 		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
 				qdma_vq->num_dequeues), nb_jobs);
 
-	if (qdma_vq->exclusive_hw_queue) {
-		/* In case of exclusive queue directly fetch from HW queue */
-		ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-		if (ret < 0) {
-			DPAA2_QDMA_ERR(
-				"Dequeue from DPDMAI device failed: %d", ret);
-			return ret;
-		}
-	} else {
-		uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
-
-		/* Get the QDMA completed jobs from the software ring.
-		 * In case they are not available on the ring poke the HW
-		 * to fetch completed jobs from corresponding HW queues
-		 */
-		ring_count = rte_ring_count(qdma_vq->status_ring);
-		if (ring_count < nb_jobs) {
-			ret = qdma_vq->dequeue_job(qdma_vq,
-					temp_vq_id, jobs, nb_jobs);
-			for (i = 0; i < ret; i++) {
-				temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
-				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(jobs[i]));
-			}
-			ring_count = rte_ring_count(
-					qdma_vq->status_ring);
-		}
-
-		if (ring_count) {
-			/* Dequeue job from the software ring
-			 * to provide to the user
-			 */
-			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-						    (void **)jobs,
-						    ring_count, NULL);
-		}
+	ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
+	if (ret < 0) {
+		DPAA2_QDMA_ERR("Dequeue from DMA%d-q%d failed(%d)",
+			qdma_vq->dpdmai_dev->dpdmai_id,
+			qdma_vq->vq_id, ret);
+		return ret;
 	}
 
 	qdma_vq->num_dequeues += ret;
@@ -1046,9 +1015,9 @@ dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
 
 static uint16_t
 dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			  const uint16_t nb_cpls,
-			  uint16_t *last_idx,
-			  enum rte_dma_status_code *st)
+	const uint16_t nb_cpls,
+	uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1056,7 +1025,7 @@ dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
 	int ret, i;
 
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	ret = dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
 
 	for (i = 0; i < ret; i++)
 		st[i] = jobs[i]->status;
@@ -1071,8 +1040,8 @@ dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
-		   uint16_t vchan, const uint16_t nb_cpls,
-		   uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1082,7 +1051,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 
 	RTE_SET_USED(has_error);
 
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
+	ret = dpaa2_qdma_dequeue_multi(qdma_vq,
 				jobs, nb_cpls);
 
 	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
@@ -1103,16 +1072,15 @@ rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 
-	return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	return dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
 }
 
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
-		    struct rte_dma_info *dev_info,
-		    uint32_t info_sz)
+	struct rte_dma_info *dev_info,
+	uint32_t info_sz __rte_unused)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
 			     RTE_DMA_CAPA_MEM_TO_DEV |
@@ -1120,7 +1088,7 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 			     RTE_DMA_CAPA_DEV_TO_MEM |
 			     RTE_DMA_CAPA_SILENT |
 			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
 
@@ -1129,12 +1097,13 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 
 static int
 dpaa2_qdma_configure(struct rte_dma_dev *dev,
-		     const struct rte_dma_conf *dev_conf,
-		     uint32_t conf_sz)
+	const struct rte_dma_conf *dev_conf,
+	uint32_t conf_sz)
 {
 	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint16_t i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1142,9 +1111,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
 	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before config.");
-		return -1;
+		DPAA2_QDMA_ERR("%s Not stopped, configure failed.",
+			dev->data->dev_name);
+		return -EBUSY;
 	}
 
 	/* Allocate Virtual Queues */
@@ -1156,6 +1125,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
 		return -ENOMEM;
 	}
+	for (i = 0; i < dev_conf->nb_vchans; i++)
+		qdma_dev->vqs[i].vq_id = i;
+
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
 
 	return 0;
@@ -1257,13 +1229,12 @@ dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 
 static int
 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
-		       const struct rte_dma_vchan_conf *conf,
-		       uint32_t conf_sz)
+	const struct rte_dma_vchan_conf *conf,
+	uint32_t conf_sz)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
-	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
 	int sg_enable = 0, ret;
@@ -1301,20 +1272,6 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
 	}
 
-	if (qdma_dev->num_vqs == 1)
-		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
-	else {
-		/* Allocate a Ring for Virtual Queue in VQ mode */
-		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
-			 dev->data->dev_id, vchan);
-		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
-			conf->nb_desc, rte_socket_id(), 0);
-		if (!qdma_dev->vqs[vchan].status_ring) {
-			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			return rte_errno;
-		}
-	}
-
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
@@ -1410,8 +1367,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
 	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before reset.");
+		DPAA2_QDMA_ERR("%s Not stopped, reset failed.",
+			dev->data->dev_name);
 		return -EBUSY;
 	}
 
@@ -1424,10 +1381,6 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 		}
 	}
 
-	/* Reset and free virtual queues */
-	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		rte_ring_free(qdma_dev->vqs[i].status_ring);
-	}
 	rte_free(qdma_dev->vqs);
 	qdma_dev->vqs = NULL;
 
@@ -1504,29 +1457,35 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	int ret;
+	struct dpaa2_queue *rxq;
+	int ret, i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			     dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("dmdmai disable failed");
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) disable failed",
+			dpdmai_dev->dpdmai_id);
+	}
 
 	/* Set up the DQRR storage for Rx */
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
-
-	if (rxq->q_storage) {
-		dpaa2_free_dq_storage(rxq->q_storage);
-		rte_free(rxq->q_storage);
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+		}
 	}
 
 	/* Close the device at underlying layer*/
 	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("Failure closing dpdmai device");
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed",
+			dpdmai_dev->dpdmai_id);
+	}
 
-	return 0;
+	return ret;
 }
 
 static int
@@ -1538,80 +1497,87 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 	struct dpdmai_rx_queue_attr rx_attr;
 	struct dpdmai_tx_queue_attr tx_attr;
 	struct dpaa2_queue *rxq;
-	int ret;
+	int ret, i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	/* Open DPDMAI device */
 	dpdmai_dev->dpdmai_id = dpdmai_id;
 	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
-					  RTE_CACHE_LINE_SIZE);
+	dpdmai_dev->qdma_dev = rte_malloc(NULL,
+		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
 	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
 		return ret;
 	}
 
 	/* Get DPDMAI attributes */
 	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				    dpdmai_dev->token, &attr);
+			dpdmai_dev->token, &attr);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
-			       ret);
+		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
 		goto init_err;
 	}
 	dpdmai_dev->num_queues = attr.num_of_queues;
 
-	/* Set up Rx Queue */
-	memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-	ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				  CMD_PRI_LOW,
-				  dpdmai_dev->token,
-				  0, 0, &rx_queue_cfg);
-	if (ret) {
-		DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
-			       ret);
-		goto init_err;
-	}
+	/* Set up Rx Queues */
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			goto init_err;
+		}
 
-	/* Allocate DQ storage for the DPDMAI Rx queues */
-	rxq = &(dpdmai_dev->rx_queue[0]);
-	rxq->q_storage = rte_malloc("dq_storage",
-				    sizeof(struct queue_storage_info_t),
-				    RTE_CACHE_LINE_SIZE);
-	if (!rxq->q_storage) {
-		DPAA2_QDMA_ERR("q_storage allocation failed");
-		ret = -ENOMEM;
-		goto init_err;
-	}
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq = &dpdmai_dev->rx_queue[i];
+		rxq->q_storage = rte_malloc("dq_storage",
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s DQ info(Q%d) alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto init_err;
+		}
 
-	memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-	ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
-		goto init_err;
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s DQ storage(Q%d) alloc failed(%d)",
+				dev->data->dev_name, i, ret);
+			goto init_err;
+		}
 	}
 
-	/* Get Rx and Tx queues FQID */
-	ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &rx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			goto init_err;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
 
-	ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &tx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
+		ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			goto init_err;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
 	}
-	dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
 
 	/* Enable the device */
 	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 811906fcbc..786dcb9308 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -18,7 +18,7 @@
 
 #define DPAA2_QDMA_MAX_SG_NB 64
 
-#define DPAA2_DPDMAI_MAX_QUEUES	1
+#define DPAA2_DPDMAI_MAX_QUEUES	16
 
 /** FLE single job pool size: job pointer(uint64_t) +
  * 3 Frame list + 2 source/destination descriptor.
@@ -245,8 +245,6 @@ typedef int (qdma_enqueue_multijob_t)(
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
-	/** Status ring of the virtual queue */
-	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
@@ -255,8 +253,6 @@ struct qdma_virt_queue {
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
-	/** States if this vq has exclusively associated hw queue */
-	uint8_t exclusive_hw_queue;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 03/30] dma/dpaa2: adapt DMA driver API
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-07-22 11:58   ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-07-22 11:58   ` [v2 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 04/30] dma/dpaa2: multiple process support Gagandeep Singh
                     ` (27 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

1) Support DMA single copy and SG copy.
2) Silent mode support.

Add index combined with length field.
For Silent mode, this index is used to notify DMA driver
which inner descriptor should be used.
For none silent mode, this index is used to notify user
which descriptor is completed.
In addition, because dpaa2 qdma is not able to preserve order,
"rte_dma_completed_t" returns multiple indexes instead of last index.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 1667 +++++++++++-------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  126 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  119 +-
 drivers/dma/dpaa2/version.map          |   13 -
 4 files changed, 799 insertions(+), 1126 deletions(-)
 delete mode 100644 drivers/dma/dpaa2/version.map

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 945ba71e4a..15d3776603 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -16,218 +16,345 @@
 
 #define DPAA2_QDMA_PREFETCH "prefetch"
 
-uint32_t dpaa2_coherent_no_alloc_cache;
-uint32_t dpaa2_coherent_alloc_cache;
+static uint32_t dpaa2_coherent_no_alloc_cache;
+static uint32_t dpaa2_coherent_alloc_cache;
 
 static inline int
-qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd,
-		     struct dpaa2_qdma_rbp *rbp, int ser)
+qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
+	const uint16_t *elem, uint16_t nb,
+	uint16_t *free_space)
 {
-	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+	if (unlikely(nb > ring->free_space))
+		return 0;
 
-	fd->simple_pci.len_sl = len;
+	if ((ring->tail + nb) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem, nb * sizeof(uint16_t));
+		ring->tail += nb;
+	} else {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem,
+			(DPAA2_QDMA_MAX_DESC - ring->tail) *
+			sizeof(uint16_t));
+		rte_memcpy(&ring->cntx_idx_ring[0],
+			&elem[DPAA2_QDMA_MAX_DESC - ring->tail],
+			(nb - DPAA2_QDMA_MAX_DESC + ring->tail) *
+			sizeof(uint16_t));
+		ring->tail = (ring->tail + nb) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space -= nb;
+	ring->nb_in_ring += nb;
 
-	fd->simple_pci.bmt = 1;
-	fd->simple_pci.fmt = 3;
-	fd->simple_pci.sl = 1;
-	fd->simple_pci.ser = ser;
+	if (free_space)
+		*free_space = ring->free_space;
 
-	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
-	fd->simple_pci.srbp = rbp->srbp;
-	if (rbp->srbp)
-		fd->simple_pci.rdttype = 0;
-	else
-		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+	return nb;
+}
 
-	/*dest is pcie memory */
-	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
-	fd->simple_pci.drbp = rbp->drbp;
-	if (rbp->drbp)
-		fd->simple_pci.wrttype = 0;
-	else
-		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+static inline int
+qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring,
+	uint16_t *elem, uint16_t max)
+{
+	int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring;
 
-	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (!ret)
+		return 0;
 
-	return 0;
+	if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			ret * sizeof(uint16_t));
+		ring->start += ret;
+	} else {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			(DPAA2_QDMA_MAX_DESC - ring->start) *
+			sizeof(uint16_t));
+		rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start],
+			&ring->cntx_idx_ring[0],
+			(ret - DPAA2_QDMA_MAX_DESC + ring->start) *
+			sizeof(uint16_t));
+		ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space += ret;
+	ring->nb_in_ring -= ret;
+
+	return ret;
 }
 
-static inline int
-qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd, int ser)
+static int
+dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq)
 {
-	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
-
-	fd->simple_ddr.len = len;
-
-	fd->simple_ddr.bmt = 1;
-	fd->simple_ddr.fmt = 3;
-	fd->simple_ddr.sl = 1;
-	fd->simple_ddr.ser = ser;
-	/**
-	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
-	 * Coherent copy of cacheable memory,
-	* lookup in downstream cache, no allocate
-	 * on miss
-	 */
-	fd->simple_ddr.rns = 0;
-	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
-	/**
-	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
-	 * Coherent write of cacheable memory,
-	 * lookup in downstream cache, no allocate on miss
-	 */
-	fd->simple_ddr.wns = 0;
-	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
+	struct qbman_eq_desc eqdesc;
+	struct qbman_swp *swp;
+	uint32_t num_to_send = 0;
+	uint16_t num_tx = 0;
+	uint32_t enqueue_loop, loop;
+	int ret;
+	struct qbman_fd *fd = qdma_vq->fd;
+	uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx;
 
-	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d",
+				rte_gettid());
+			return -EIO;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
 
-	return 0;
+	/* Prepare enqueue descriptor */
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_fq(&eqdesc, txq_id);
+	qbman_eq_desc_set_no_orp(&eqdesc, 0);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+	while (nb_fds > 0) {
+		num_to_send = (nb_fds > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_fds;
+
+		/* Enqueue the packet to the QBMAN */
+		enqueue_loop = 0;
+		loop = num_to_send;
+
+		while (enqueue_loop < loop) {
+			ret = qbman_swp_enqueue_multiple(swp,
+				&eqdesc,
+				&fd[num_tx + enqueue_loop],
+				NULL,
+				loop - enqueue_loop);
+			if (likely(ret >= 0))
+				enqueue_loop += ret;
+		}
+		num_tx += num_to_send;
+		nb_fds -= loop;
+	}
+
+	qdma_vq->num_enqueues += num_tx;
+	if (unlikely(num_tx != qdma_vq->fd_idx)) {
+		dst_idx = 0;
+		for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) {
+			rte_memcpy(&qdma_vq->fd[dst_idx],
+				&qdma_vq->fd[idx],
+				sizeof(struct qbman_fd));
+			dst_idx++;
+		}
+	}
+	qdma_vq->fd_idx -= num_tx;
+
+	return num_tx;
 }
 
 static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			uint64_t fle_iova,
-			struct dpaa2_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags, uint32_t fmt)
+fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
+	struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest,
+	uint32_t fmt)
 {
-	struct qdma_sdd *sdd;
-	uint64_t sdd_iova;
-
-	sdd = (struct qdma_sdd *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
-			QDMA_FLE_SDD_OFFSET);
-	sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
+	struct qbman_fle *fle = fle_sdd->fle;
+	struct qdma_sdd *sdd = fle_sdd->sdd;
+	uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd);
 
 	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, sdd_iova);
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)));
 
 	/* source and destination descriptor */
 	if (rbp && rbp->enable) {
 		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
 		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
 		}
-		sdd++;
 		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
 		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
 		}
-
 	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
 	}
-	fle++;
 	/* source frame list to source buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
 #endif
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
-	fle++;
 	/* destination frame list to destination buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
 #endif
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
 }
 
-static inline int
-dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
-		     struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_job **job,
-		     uint16_t nb_jobs)
+static void
+sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 {
+	uint16_t i;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+		/* source SG */
+		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+		/* destination SG */
+		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+	}
+}
+
+static void
+fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx,
+	struct qdma_virt_queue *qdma_vq)
+{
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	size_t iova;
-	int ret = 0, loop;
-	int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
-				0 : 1;
-
-	for (loop = 0; loop < nb_jobs; loop++) {
-		if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
-			iova = (size_t)job[loop]->dest;
-		else
-			iova = (size_t)job[loop]->src;
-
-		/* Set the metadata */
-		job[loop]->vq_id = qdma_vq->vq_id;
-		ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-		*ppjob = job[loop];
-
-		if ((rbp->drbp == 1) || (rbp->srbp == 1))
-			ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], rbp, ser);
-		else
-			ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], ser);
+
+	memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg));
+
+	src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge);
+	dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge);
+
+	sg_entry_pre_populate(sg_cntx);
+	fle_sdd_pre_populate(&sg_cntx->fle_sdd,
+		rbp, src_sge_iova, dst_sge_iova,
+		QBMAN_FLE_WORD4_FMT_SGE);
+}
+
+static inline uint32_t
+sg_entry_post_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i = 0, idx;
+	uint32_t total_len = 0, len;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < (nb_sge - 1); i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = len;
+
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = len;
+		total_len += len;
+		sg_cntx->cntx_idx[i] = idx;
+
+		src_sge->ctrl.f = 0;
+		dst_sge->ctrl.f = 0;
+		src_sge++;
+		dst_sge++;
 	}
 
-	return ret;
+	if (unlikely(src[i].length != dst[i].length))
+		return -ENOTSUP;
+
+	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+	idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+
+	src_sge->addr_lo = (uint32_t)src[i].addr;
+	src_sge->addr_hi = (src[i].addr >> 32);
+	src_sge->data_len.data_len_sl0 = len;
+
+	dst_sge->addr_lo = (uint32_t)dst[i].addr;
+	dst_sge->addr_hi = (dst[i].addr >> 32);
+	dst_sge->data_len.data_len_sl0 = len;
+
+	total_len += len;
+	sg_cntx->cntx_idx[i] = idx;
+	sg_cntx->job_nb = nb_sge;
+
+	src_sge->ctrl.f = QDMA_SG_F;
+	dst_sge->ctrl.f = QDMA_SG_F;
+
+	return total_len;
 }
 
-static uint32_t
-qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
-		       struct qdma_sg_entry *src_sge,
-		       struct qdma_sg_entry *dst_sge,
-		       uint16_t nb_jobs)
+static inline void
+sg_fle_post_populate(struct qbman_fle fle[],
+	size_t len)
 {
-	uint16_t i;
-	uint32_t total_len = 0;
-	uint64_t iova;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
+}
 
-	for (i = 0; i < nb_jobs; i++) {
-		/* source SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
-			src_sge->addr_lo = (uint32_t)jobs[i]->src;
-			src_sge->addr_hi = (jobs[i]->src >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
-			src_sge->addr_lo = (uint32_t)iova;
-			src_sge->addr_hi = iova >> 32;
-		}
-		src_sge->data_len.data_len_sl0 = jobs[i]->len;
+static inline uint32_t
+sg_entry_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i, idx;
+	uint32_t total_len = 0, len;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < nb_sge; i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = len;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -235,16 +362,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		/* destination SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
-			dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
-			dst_sge->addr_hi = (jobs[i]->dest >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
-			dst_sge->addr_lo = (uint32_t)iova;
-			dst_sge->addr_hi = iova >> 32;
-		}
-		dst_sge->data_len.data_len_sl0 = jobs[i]->len;
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = len;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -252,9 +372,10 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += jobs[i]->len;
+		total_len += len;
+		sg_cntx->cntx_idx[i] = idx;
 
-		if (i == (nb_jobs - 1)) {
+		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
 			dst_sge->ctrl.f = QDMA_SG_F;
 		} else {
@@ -265,327 +386,432 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 		dst_sge++;
 	}
 
+	sg_cntx->job_nb = nb_sge;
+
 	return total_len;
 }
 
-static inline int
-dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
-				  struct qbman_fd *fd,
-				  struct rte_dpaa2_qdma_job **job,
-				  uint16_t nb_jobs)
+static inline void
+fle_populate(struct qbman_fle fle[],
+	struct qdma_sdd sdd[], uint64_t sdd_iova,
+	struct dpaa2_qdma_rbp *rbp,
+	uint64_t src_iova, uint64_t dst_iova, size_t len,
+	uint32_t fmt)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
-	void *elem;
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		(DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))));
 
-	for (i = 0; i < nb_jobs; i++) {
-		elem = job[i]->usr_elem;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem);
-#endif
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+		if (rbp->srbp) {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
+		}
+		/* destination */
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
-		job[i]->vq_id = qdma_vq->vq_id;
+		if (rbp->drbp) {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
+		}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	} else {
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
+	}
+	/* source frame list to source buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+#endif
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
+	/* destination frame list to destination buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+#endif
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-				DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
+}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-			job[i]->src, job[i]->dest, job[i]->len,
-			job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
-	}
+static inline void
+fle_post_populate(struct qbman_fle fle[],
+	uint64_t src, uint64_t dest, size_t len)
+{
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-	return 0;
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
 static inline int
-dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
-			   struct qbman_fd *fd,
-			   struct rte_dpaa2_qdma_job **job,
-			   uint16_t nb_jobs)
+dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	uint16_t expected = qdma_vq->fd_idx;
 	int ret;
-	void *elem[DPAA2_QDMA_MAX_DESC];
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
 
-	ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
+	ret = dpaa2_qdma_multi_eq(qdma_vq);
+	if (likely(ret == expected))
+		return 0;
 
-	for (i = 0; i < nb_jobs; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem[i]);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
-#endif
+	return -EBUSY;
+}
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem[i] +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+static inline void
+dpaa2_qdma_fle_dump(const struct qbman_fle *fle)
+{
+	DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d",
+		fle->addr_hi, fle->addr_lo, fle->length, fle->frc,
+		fle->word4.bpid);
+	DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d",
+		fle->word4.ivp, fle->word4.bmt, fle->word4.offset,
+		fle->word4.fmt, fle->word4.sl, fle->word4.f);
+}
 
-		job[i]->vq_id = qdma_vq->vq_id;
+static inline void
+dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd)
+{
+	DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x",
+		sdd->stride, sdd->rbpcmd, sdd->cmd);
+}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+static inline void
+dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge)
+{
+	DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x",
+		sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0,
+		sge->ctrl_fields);
+}
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
-		DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
+static void
+dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
+{
+	int i;
+	const struct qdma_cntx_fle_sdd *fle_sdd;
+	const struct qdma_sdd *sdd;
+	const struct qdma_cntx_sg *cntx_sg = NULL;
+	const struct qdma_cntx_long *cntx_long = NULL;
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
+	sdd = fle_sdd->sdd;
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-				job[i]->src, job[i]->dest, job[i]->len,
-				job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
+	for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) {
+		DPAA2_QDMA_INFO("fle[%d] info:", i);
+		dpaa2_qdma_fle_dump(&fle[i]);
 	}
 
-	return 0;
+	if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
+		fle[DPAA2_QDMA_DST_FLE].word4.fmt) {
+		DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)",
+			DPAA2_QDMA_SRC_FLE,
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt,
+			DPAA2_QDMA_DST_FLE,
+			fle[DPAA2_QDMA_DST_FLE].word4.fmt);
+
+		return;
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SGE) {
+		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
+			fle_sdd);
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SBF) {
+		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
+			fle_sdd);
+	} else {
+		DPAA2_QDMA_ERR("Unsupported fle format:%d",
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
+		return;
+	}
+
+	for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) {
+		DPAA2_QDMA_INFO("sdd[%d] info:", i);
+		dpaa2_qdma_sdd_dump(&sdd[i]);
+	}
+
+	if (cntx_long) {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
+			cntx_long->cntx_idx);
+	}
+
+	if (cntx_sg) {
+		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
+			cntx_sg->job_nb);
+		if (!cntx_sg->job_nb ||
+			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			DPAA2_QDMA_ERR("Invalid SG job number:%d",
+				cntx_sg->job_nb);
+			return;
+		}
+		for (i = 0; i < cntx_sg->job_nb; i++) {
+			DPAA2_QDMA_INFO("sg[%d] src info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]);
+			DPAA2_QDMA_INFO("sg[%d] dst info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]);
+			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
+				cntx_sg->cntx_idx[i]);
+		}
+	}
 }
 
-static inline int
-dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
-			struct qbman_fd *fd,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
+static int
+dpaa2_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	void *elem;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	uint32_t cntx_idx, len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_sg *cntx_sg;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova, src, dst;
-	int ret = 0, i;
-	struct qdma_sg_entry *src_sge, *dst_sge;
-	uint32_t len, fmt, flags;
-
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
-		elem = job[0]->usr_elem;
+	struct qdma_sdd *sdd;
+
+	if (unlikely(nb_src != nb_dst))
+		return -ENOTSUP;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_dev->is_silent) {
+		cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[0].length);
+		cntx_sg = qdma_vq->cntx_sg[cntx_idx];
 	} else {
-		ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
-		if (ret) {
-			DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_sg);
+		if (ret)
 			return ret;
-		}
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	elem_iova = rte_mempool_virt2iova(elem);
+	cntx_iova = rte_mempool_virt2iova(cntx_sg);
 #else
-	elem_iova = DPAA2_VADDR_TO_IOVA(elem);
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
 #endif
 
-	/* Set the metadata */
-	/* Save job context. */
-	*((uint16_t *)
-	((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
-	ppjob = (struct rte_dpaa2_qdma_job **)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
-	for (i = 0; i < nb_jobs; i++)
-		ppjob[i] = job[i];
-
-	ppjob[0]->vq_id = qdma_vq->vq_id;
-
-	fle = (struct qbman_fle *)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-	fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	fle = cntx_sg->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_sg, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
 	DPAA2_SET_FD_ADDR(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
-		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length))
+			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
 
-	/* Populate FLE */
-	if (likely(nb_jobs > 1)) {
-		src_sge = (struct qdma_sg_entry *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
-		dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
-		src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
-		dst = src +
-			DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
-		len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
-		fmt = QBMAN_FLE_WORD4_FMT_SGE;
-		flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
+		len = sg_entry_post_populate(src, dst,
+			cntx_sg, nb_src);
+		sg_fle_post_populate(fle, len);
 	} else {
-		src = job[0]->src;
-		dst = job[0]->dest;
-		len = job[0]->len;
-		fmt = QBMAN_FLE_WORD4_FMT_SBF;
-		flags = job[0]->flags;
+		sdd = cntx_sg->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		src_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_src_entry);
+		dst_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_dst_entry);
+		len = sg_entry_populate(src, dst,
+			cntx_sg, nb_src);
+
+		fle_populate(fle, sdd, sdd_iova,
+			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
+			QBMAN_FLE_WORD4_FMT_SGE);
 	}
 
-	memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
-
-	dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-					src, dst, len, flags, fmt);
-
-	return 0;
-}
-
-static inline uint16_t
-dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
-		      const struct qbman_fd *fd,
-		      struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
-{
-	uint16_t vqid;
-	size_t iova;
-	struct rte_dpaa2_qdma_job **ppjob;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
-	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
-		iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
-				| (uint64_t)fd->simple_pci.daddr_lo);
-	else
-		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
-				| (uint64_t)fd->simple_pci.saddr_lo);
+	qdma_vq->fd_idx++;
 
-	ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-	*job = (struct rte_dpaa2_qdma_job *)*ppjob;
-	(*job)->status = (fd->simple_pci.acc_err << 8) |
-					(fd->simple_pci.error);
-	vqid = (*job)->vq_id;
-	*nb_jobs = 1;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return vqid;
+	return ret;
 }
 
-static inline uint16_t
-dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-	const struct qbman_fd *fd,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t *nb_jobs)
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	uint16_t cntx_idx;
+	uint32_t len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_long *cntx_long;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t status;
+	struct qdma_sdd *sdd;
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	memset(fd, 0, sizeof(struct qbman_fd));
 
-	*nb_jobs = 1;
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-			QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
+	cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length);
+	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length);
 
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
+	if (qdma_dev->is_silent) {
+		cntx_long = qdma_vq->cntx_long[cntx_idx];
+	} else {
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_long);
+		if (ret)
+			return ret;
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		cntx_long->cntx_idx = cntx_idx;
+	}
 
-	*job = *ppjob;
-	(*job)->status = status;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	cntx_iova = rte_mempool_virt2iova(cntx_long);
+#else
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
+#endif
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	fle = cntx_long->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_long, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	return (*job)->vq_id;
-}
+	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
 
-static inline uint16_t
-dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-	const struct qbman_fd *fd,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t *nb_jobs)
-{
-	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t i, status;
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+				&qdma_vq->rbp,
+				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+		}
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	*nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	for (i = 0; i < (*nb_jobs); i++) {
-		job[i] = ppjob[i];
-		job[i]->status = status;
+		fle_post_populate(fle, src, dst, len);
+	} else {
+		sdd = cntx_long->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_long, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
+			src, dst, len,
+			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
-	return job[0]->vq_id;
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
+
+	return ret;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
-static int
-dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-	uint16_t *vq_id,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t nb_jobs)
+static uint16_t
+dpaa2_qdma_dequeue(void *dev_private,
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *cntx_idx, bool *has_error)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
 	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
 	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
 	uint8_t status, pending;
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid;
 	int ret, pull_size;
+	struct qbman_fle *fle;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_long *cntx_long;
+	uint16_t free_space = 0, fle_elem_nb = 0;
 
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
+	if (unlikely(qdma_dev->is_silent))
+		return 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
+			DPAA2_QDMA_ERR("Allocate portal err, tid(%d)",
 				rte_gettid());
+			if (has_error)
+				*has_error = true;
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
-	rx_fqid = rxq->fqid;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ?
-		dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_cpls > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_cpls;
+	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
+	fqid = rxq->fqid;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -594,21 +820,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->last_num_pkts = pull_size;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      q_storage->last_num_pkts);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+			q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
-				get_swp_active_dqs(
-				DPAA2_PER_LCORE_DPIO->index)))
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
 				;
 			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued.QBMAN busy\n");
+				DPAA2_QDMA_DP_WARN("QBMAN busy");
 					/* Portal was busy, try again */
 				continue;
 			}
@@ -617,7 +842,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->active_dqs = dq_storage;
 		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
-				   dq_storage);
+			dq_storage);
 	}
 
 	dq_storage = q_storage->active_dqs;
@@ -631,7 +856,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
-	qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
 
@@ -664,27 +889,40 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-
-		vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
-								&num_rx_ret);
-		if (vq_id)
-			vq_id[num_rx] = vqid;
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		fle = fle_sdd->fle;
+		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
+		fle_elem_nb++;
+		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+			QBMAN_FLE_WORD4_FMT_SGE) {
+			cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, &free_space);
+		} else {
+			cntx_long = container_of(fle_sdd,
+				struct qdma_cntx_long, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&cntx_long->cntx_idx,
+				1, &free_space);
+		}
+		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+			pending = 0;
 
 		dq_storage++;
-		num_rx += num_rx_ret;
 	} while (pending);
 
 	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 		while (!qbman_check_command_complete(
-			get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
 			;
 		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 	}
 	/* issue a volatile dequeue command for next pull */
 	while (1) {
 		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN(
-				"VDQ command is not issued. QBMAN is busy (2)\n");
+			DPAA2_QDMA_DP_WARN("QBMAN is busy (2)");
 			continue;
 		}
 		break;
@@ -694,387 +932,18 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	return num_rx;
-}
-
-static int
-dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-	uint16_t *vq_id,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq;
-	struct qbman_result *dq_storage;
-	struct qbman_pull_desc pulldesc;
-	struct qbman_swp *swp;
-	uint8_t status, pending;
-	uint8_t num_rx = 0;
-	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid;
-	int ret, next_pull, num_pulled = 0;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
-
-	next_pull = nb_jobs;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
-	rx_fqid = rxq->fqid;
+	rte_mempool_put_bulk(qdma_vq->fle_pool,
+		qdma_vq->fle_elem, fle_elem_nb);
 
-	do {
-		dq_storage = rxq->q_storage->dq_storage[0];
-		/* Prepare dequeue descriptor */
-		qbman_pull_desc_clear(&pulldesc);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
-		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
+		cntx_idx, nb_cpls);
 
-		if (next_pull > dpaa2_dqrr_size) {
-			qbman_pull_desc_set_numframes(&pulldesc,
-					dpaa2_dqrr_size);
-			next_pull -= dpaa2_dqrr_size;
-		} else {
-			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
-			next_pull = 0;
-		}
-
-		while (1) {
-			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued. QBMAN busy");
-				/* Portal was busy, try again */
-				continue;
-			}
-			break;
-		}
-
-		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-		/* Check if the previous issued command is completed. */
-		while (!qbman_check_command_complete(dq_storage))
-			;
-
-		num_pulled = 0;
-		pending = 1;
-
-		do {
-			/* Loop until dq_storage is updated
-			 * with new token by QBMAN
-			 */
-			while (!qbman_check_new_result(dq_storage))
-				;
-			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-
-			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-				pending = 0;
-				/* Check for valid frame. */
-				status = qbman_result_DQ_flags(dq_storage);
-				if (unlikely((status &
-					QBMAN_DQ_STAT_VALIDFRAME) == 0))
-					continue;
-			}
-			fd = qbman_result_DQ_fd(dq_storage);
-
-			vqid = qdma_vq->get_job(qdma_vq, fd,
-						&job[num_rx], &num_rx_ret);
-			if (vq_id)
-				vq_id[num_rx] = vqid;
-
-			dq_storage++;
-			num_rx += num_rx_ret;
-			num_pulled++;
-
-		} while (pending);
-	/* Last VDQ provided all packets and more packets are requested */
-	} while (next_pull && num_pulled == dpaa2_dqrr_size);
+	if (has_error)
+		*has_error = false;
 
 	return num_rx;
 }
 
-static int
-dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
-	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
-	struct qbman_eq_desc eqdesc;
-	struct qbman_swp *swp;
-	uint32_t num_to_send = 0;
-	uint16_t num_tx = 0;
-	uint32_t enqueue_loop, loop;
-	int ret;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	/* Prepare enqueue descriptor */
-	qbman_eq_desc_clear(&eqdesc);
-	qbman_eq_desc_set_fq(&eqdesc, txq_id);
-	qbman_eq_desc_set_no_orp(&eqdesc, 0);
-	qbman_eq_desc_set_response(&eqdesc, 0, 0);
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		uint16_t fd_nb;
-		uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		uint16_t job_idx = 0;
-		uint16_t fd_sg_nb[8];
-		uint16_t nb_jobs_ret = 0;
-
-		if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
-		else
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
-
-		memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
-
-		for (loop = 0; loop < fd_nb; loop++) {
-			ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
-					      sg_entry_nb);
-			if (unlikely(ret < 0))
-				return 0;
-			fd_sg_nb[loop] = sg_entry_nb;
-			nb_jobs -= sg_entry_nb;
-			job_idx += sg_entry_nb;
-			sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		}
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-
-		while (enqueue_loop < fd_nb) {
-			ret = qbman_swp_enqueue_multiple(swp,
-					&eqdesc, &fd[enqueue_loop],
-					NULL, fd_nb - enqueue_loop);
-			if (likely(ret >= 0)) {
-				for (loop = 0; loop < (uint32_t)ret; loop++)
-					nb_jobs_ret +=
-						fd_sg_nb[enqueue_loop + loop];
-				enqueue_loop += ret;
-			}
-		}
-
-		return nb_jobs_ret;
-	}
-
-	memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
-
-	while (nb_jobs > 0) {
-		num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
-			dpaa2_eqcr_size : nb_jobs;
-
-		ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
-						&job[num_tx], num_to_send);
-		if (unlikely(ret < 0))
-			break;
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-		loop = num_to_send;
-
-		while (enqueue_loop < loop) {
-			ret = qbman_swp_enqueue_multiple(swp,
-						&eqdesc,
-						&fd[num_tx + enqueue_loop],
-						NULL,
-						loop - enqueue_loop);
-			if (likely(ret >= 0))
-				enqueue_loop += ret;
-		}
-		num_tx += num_to_send;
-		nb_jobs -= loop;
-	}
-
-	qdma_vq->num_enqueues += num_tx;
-
-	return num_tx;
-}
-
-static inline int
-dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
-				qdma_vq->num_valid_jobs);
-
-	qdma_vq->num_valid_jobs = 0;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *job;
-	int idx, ret;
-
-	idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
-
-	ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return -ENOSPC;
-	}
-
-	job->src = src;
-	job->dest = dst;
-	job->len = length;
-	job->flags = flags;
-	job->status = 0;
-	job->vq_id = vchan;
-
-	qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
-	qdma_vq->num_valid_jobs++;
-
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
-		dpaa2_qdma_submit(dev_private, vchan);
-
-	return idx;
-}
-
-int
-rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-	struct rte_dpaa2_qdma_job **jobs,
-	uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_virt_queue *qdma_vq,
-	struct rte_dpaa2_qdma_job **jobs,
-	uint16_t nb_jobs)
-{
-	int ret;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-	}
-
-	/* Only dequeue when there are pending jobs on VQ */
-	if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
-		return 0;
-
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
-		qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
-		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
-				qdma_vq->num_dequeues), nb_jobs);
-
-	ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-	if (ret < 0) {
-		DPAA2_QDMA_ERR("Dequeue from DMA%d-q%d failed(%d)",
-			qdma_vq->dpdmai_dev->dpdmai_id,
-			qdma_vq->vq_id, ret);
-		return ret;
-	}
-
-	qdma_vq->num_dequeues += ret;
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-	const uint16_t nb_cpls,
-	uint16_t *last_idx,
-	enum rte_dma_status_code *st)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret, i;
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
-
-	for (i = 0; i < ret; i++)
-		st[i] = jobs[i]->status;
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue(void *dev_private,
-	uint16_t vchan, const uint16_t nb_cpls,
-	uint16_t *last_idx, bool *has_error)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret;
-
-	RTE_SET_USED(has_error);
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_vq,
-				jobs, nb_cpls);
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-uint16_t
-rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-			       struct rte_dpaa2_qdma_job **jobs,
-			       uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
-}
-
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	struct rte_dma_info *dev_info,
@@ -1119,80 +988,22 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 	/* Allocate Virtual Queues */
 	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
 	qdma_dev->vqs = rte_malloc(name,
-			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
-			RTE_CACHE_LINE_SIZE);
+		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+		RTE_CACHE_LINE_SIZE);
 	if (!qdma_dev->vqs) {
-		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+		DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.",
+			dev->data->dev_name, dev_conf->nb_vchans);
 		return -ENOMEM;
 	}
 	for (i = 0; i < dev_conf->nb_vchans; i++)
 		qdma_dev->vqs[i].vq_id = i;
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
+	qdma_dev->is_silent = dev_conf->enable_silent;
 
 	return 0;
 }
 
-static int
-check_devargs_handler(__rte_unused const char *key,
-		      const char *value,
-		      __rte_unused void *opaque)
-{
-	if (strcmp(value, "1"))
-		return -1;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
-{
-	struct rte_kvargs *kvlist;
-
-	if (!devargs)
-		return 0;
-
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (!kvlist)
-		return 0;
-
-	if (!rte_kvargs_count(kvlist, key)) {
-		rte_kvargs_free(kvlist);
-		return 0;
-	}
-
-	if (rte_kvargs_process(kvlist, key,
-			       check_devargs_handler, NULL) < 0) {
-		rte_kvargs_free(kvlist);
-		return 0;
-	}
-	rte_kvargs_free(kvlist);
-
-	return 1;
-}
-
-/* Enable FD in Ultra Short format */
-void
-rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
-}
-
-/* Enable internal SG processing */
-void
-rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
-}
-
 static int
 dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 	const struct rte_dma_vchan_conf *conf)
@@ -1236,8 +1047,8 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
 	char pool_name[64];
-	int fd_long_format = 1;
-	int sg_enable = 0, ret;
+	int ret;
+	char *env = NULL;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1247,85 +1058,70 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	if (ret)
 		return ret;
 
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
-		sg_enable = 1;
-
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
-		fd_long_format = 0;
-
-	if (dev->data->dev_conf.enable_silent)
-		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
+	/**Default enable FLE PRE POPULATE*/
+	env = getenv("DPAA2_QDMA_FLE_PRE_POPULATE");
+	if (env)
+		qdma_dev->vqs[vchan].fle_pre_populate = atoi(env);
+	else
+		qdma_dev->vqs[vchan].fle_pre_populate = 1;
 
-	if (sg_enable) {
-		if (qdma_dev->num_vqs != 1) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports physical queue!");
-			return -ENODEV;
-		}
-		if (!fd_long_format) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports long FD format!");
-			return -ENODEV;
-		}
-		pool_size = QDMA_FLE_SG_POOL_SIZE;
-	} else {
-		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
-	}
+	env = getenv("DPAA2_QDMA_DESC_DEBUG");
+	if (env && atoi(env))
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG;
+	else
+		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
+			    sizeof(struct qdma_cntx_long));
+
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+			DPAA2_QDMA_MAX_DESC * 2, pool_size,
+			512, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
 	if (!qdma_dev->vqs[vchan].fle_pool) {
-		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
-		return -ENOMEM;
-	}
-
-	snprintf(pool_name, sizeof(pool_name),
-		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev->vqs[vchan].job_pool) {
-		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
 
-	if (fd_long_format) {
-		if (sg_enable) {
-			qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
-		} else {
-			if (dev->data->dev_conf.enable_silent)
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf_no_rsp;
-			else
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
+	if (qdma_dev->is_silent) {
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_sg,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("sg cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
+		}
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_long,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
 		}
 	} else {
-		qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
-		qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
-	}
-
-	if (dpaa2_qdma_get_devargs(dev->device->devargs,
-			DPAA2_QDMA_PREFETCH)) {
-		/* If no prefetch is configured. */
-		qdma_dev->vqs[vchan].dequeue_job =
-				dpdmai_dev_dequeue_multijob_prefetch;
-		DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
-	} else {
-		qdma_dev->vqs[vchan].dequeue_job =
-			dpdmai_dev_dequeue_multijob_no_prefetch;
+		qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL,
+				sizeof(struct qdma_cntx_idx_ring),
+				RTE_CACHE_LINE_SIZE);
+		if (!qdma_dev->vqs[vchan].ring_cntx_idx) {
+			DPAA2_QDMA_ERR("DQ response ring alloc failed.");
+			return -ENOMEM;
+		}
+		qdma_dev->vqs[vchan].ring_cntx_idx->start = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->free_space =
+				QDMA_CNTX_IDX_RING_MAX_FREE;
+		qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0;
+		qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL,
+				sizeof(void *) * DPAA2_QDMA_MAX_DESC,
+				RTE_CACHE_LINE_SIZE);
 	}
 
 	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
 	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
-	qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
 
 	return 0;
 }
@@ -1374,9 +1170,12 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
-		    qdma_dev->vqs[i].num_dequeues)) {
-			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+		if ((qdma_dev->vqs[i].num_enqueues !=
+		    qdma_dev->vqs[i].num_dequeues) &&
+		    !qdma_dev->is_silent) {
+			DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")",
+				i, qdma_dev->vqs[i].num_enqueues,
+				qdma_dev->vqs[i].num_dequeues);
 			return -EBUSY;
 		}
 	}
@@ -1618,7 +1417,7 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 
 static int
 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
-		 struct rte_dpaa2_device *dpaa2_dev)
+	struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_dma_dev *dmadev;
 	int ret;
@@ -1628,8 +1427,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	RTE_SET_USED(dpaa2_drv);
 
 	dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
-				      rte_socket_id(),
-				      sizeof(struct dpaa2_dpdmai_dev));
+		rte_socket_id(),
+		sizeof(struct dpaa2_dpdmai_dev));
 	if (!dmadev) {
 		DPAA2_QDMA_ERR("Unable to allocate dmadevice");
 		return -EINVAL;
@@ -1639,10 +1438,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	dmadev->dev_ops = &dpaa2_qdma_ops;
 	dmadev->device = &dpaa2_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
-	dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
+	dmadev->fp_obj->copy = dpaa2_qdma_copy;
+	dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa2_qdma_submit;
 	dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
-	dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
 	dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 786dcb9308..ee34532408 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -5,7 +5,7 @@
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
-#define DPAA2_QDMA_MAX_DESC		1024
+#define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
@@ -13,48 +13,9 @@
 #define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
 #define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
 
-#define DPAA2_QDMA_MAX_FLE 3
-#define DPAA2_QDMA_MAX_SDD 2
-
-#define DPAA2_QDMA_MAX_SG_NB 64
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-/** FLE single job pool size: job pointer(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor.
- */
-#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
-			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-/** FLE sg jobs pool size: job number(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor  +
- * 64 (src + dst) sg entries + 64 jobs pointers.
- */
-#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
-		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
-		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
-
-#define QDMA_FLE_JOB_NB_OFFSET 0
-
-#define QDMA_FLE_SINGLE_JOB_OFFSET 0
-
-#define QDMA_FLE_FLE_OFFSET \
-		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
-
-#define QDMA_FLE_SDD_OFFSET \
-		(QDMA_FLE_FLE_OFFSET + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
-
-#define QDMA_FLE_SG_ENTRY_OFFSET \
-		(QDMA_FLE_SDD_OFFSET + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-#define QDMA_FLE_SG_JOBS_OFFSET \
-		(QDMA_FLE_SG_ENTRY_OFFSET + \
-		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
+#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
 
 /** FLE pool cache size */
 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
@@ -202,6 +163,39 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum {
+	DPAA2_QDMA_SDD_FLE,
+	DPAA2_QDMA_SRC_FLE,
+	DPAA2_QDMA_DST_FLE,
+	DPAA2_QDMA_MAX_FLE
+};
+
+enum {
+	DPAA2_QDMA_SRC_SDD,
+	DPAA2_QDMA_DST_SDD,
+	DPAA2_QDMA_MAX_SDD
+};
+
+struct qdma_cntx_fle_sdd {
+	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
+	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
+} __rte_packed;
+
+struct qdma_cntx_sg {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t job_nb;
+	uint16_t rsv[3];
+} __rte_packed;
+
+struct qdma_cntx_long {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	uint16_t cntx_idx;
+	uint16_t rsv[3];
+} __rte_packed;
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -221,27 +215,18 @@ struct dpaa2_dpdmai_dev {
 	struct qdma_device *qdma_dev;
 };
 
-struct qdma_virt_queue;
-
-typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
-					const struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t *nb_jobs);
-typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
-					struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs);
-
-typedef int (qdma_dequeue_multijob_t)(
-				struct qdma_virt_queue *qdma_vq,
-				uint16_t *vq_id,
-				struct rte_dpaa2_qdma_job **job,
-				uint16_t nb_jobs);
+#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
+#define QDMA_CNTX_IDX_RING_MAX_FREE \
+	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
+struct qdma_cntx_idx_ring {
+	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
+	uint16_t start;
+	uint16_t tail;
+	uint16_t free_space;
+	uint16_t nb_in_ring;
+};
 
-typedef int (qdma_enqueue_multijob_t)(
-			struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs);
+#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
@@ -249,10 +234,11 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
-	uint8_t in_use;
+	uint8_t fle_pre_populate;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
@@ -262,18 +248,17 @@ struct qdma_virt_queue {
 
 	uint16_t vq_id;
 	uint32_t flags;
+	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
+	uint16_t fd_idx;
+	struct qdma_cntx_idx_ring *ring_cntx_idx;
+
+	/**Used for silent enabled*/
+	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
 
-	struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
-	struct rte_mempool *job_pool;
 	int num_valid_jobs;
 
 	struct rte_dma_stats stats;
-
-	qdma_set_fd_t *set_fd;
-	qdma_get_job_t *get_job;
-
-	qdma_dequeue_multijob_t *dequeue_job;
-	qdma_enqueue_multijob_t *enqueue_job;
 };
 
 /** Represents a QDMA device. */
@@ -284,6 +269,7 @@ struct qdma_device {
 	uint16_t num_vqs;
 	/** Device state - started or stopped */
 	uint8_t state;
+	uint8_t is_silent;
 };
 
 #endif /* _DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index b0bf9d8bcc..729bff42bb 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,118 +7,19 @@
 
 #include <rte_compat.h>
 
-/** States if the source addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_SRC_PHY		(1ULL << 30)
+#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
+#define RTE_DPAA2_QDMA_LEN_MASK \
+	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/** States if the destination addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
+#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
+	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
 
-/** Determines a QDMA job */
-struct rte_dpaa2_qdma_job {
-	/** Source Address from where DMA is (to be) performed */
-	uint64_t src;
-	/** Destination Address where DMA is (to be) done */
-	uint64_t dest;
-	/** Length of the DMA operation in bytes. */
-	uint32_t len;
-	/** See RTE_QDMA_JOB_ flags */
-	uint32_t flags;
-	/**
-	 * Status of the transaction.
-	 * This is filled in the dequeue operation by the driver.
-	 * upper 8bits acc_err for route by port.
-	 * lower 8bits fd error
-	 */
-	uint16_t status;
-	uint16_t vq_id;
-	uint64_t cnxt;
-	/**
-	 * FLE pool element maintained by user, in case no qDMA response.
-	 * Note: the address must be allocated from DPDK memory pool.
-	 */
-	void *usr_elem;
-};
+#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
+	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable FD in Ultra Short format on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable internal SG processing on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
+	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enqueue a copy operation onto the virtual DMA channel for silent mode,
- * when dequeue is not required.
- *
- * This queues up a copy operation to be performed by hardware, if the 'flags'
- * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
- * this operation, otherwise do not trigger doorbell.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs to be submitted to QDMA.
- * @param nb_cpls
- *   Number of DMA jobs.
- *
- * @return
- *   - >= 0..Number of enqueued job.
- *   - -ENOSPC: if no space left to enqueue.
- *   - other values < 0 on failure.
- */
-__rte_experimental
-int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Return the number of operations that have been successfully completed.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs completed by QDMA.
- * @param nb_cpls
- *   Number of completed DMA jobs.
- *
- * @return
- *   The number of operations that successfully completed. This return value
- *   must be less than or equal to the value of nb_cpls.
- */
-__rte_experimental
-uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
 
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
deleted file mode 100644
index eb012cfbfc..0000000000
--- a/drivers/dma/dpaa2/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-DPDK_24 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 22.07
-	rte_dpaa2_qdma_completed_multi;
-	rte_dpaa2_qdma_copy_multi;
-	rte_dpaa2_qdma_vchan_fd_us_enable;
-	rte_dpaa2_qdma_vchan_internal_sg_enable;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 04/30] dma/dpaa2: multiple process support
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (2 preceding siblings ...)
  2024-07-22 11:58   ` [v2 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
                     ` (26 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Anatoly Burakov; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Support multiple processes for dpaa2 dma.
1) Move queue configuration procedure from init function to
device configuration function which is called by user.

2) Instances of dpaa2_dpdmai_dev and qdma_device are allocated
from primary process and shared between multiple processes.

3) MC reg is per process mapped.

4) User is responsible to check vq number configured before using
dma device to identify if this device is occupied by other process.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 405 ++++++++++++++++++++-------------
 drivers/dma/dpaa2/dpaa2_qdma.h |   6 +-
 2 files changed, 254 insertions(+), 157 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 15d3776603..44b82c139e 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #include <rte_eal.h>
@@ -19,6 +19,8 @@
 static uint32_t dpaa2_coherent_no_alloc_cache;
 static uint32_t dpaa2_coherent_alloc_cache;
 
+static struct fsl_mc_io s_proc_mc_reg;
+
 static inline int
 qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
 	const uint16_t *elem, uint16_t nb,
@@ -960,6 +962,9 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->dev_name = dev->device->name;
+	if (dpdmai_dev->qdma_dev)
+		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
 
 	return 0;
 }
@@ -969,25 +974,102 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 	const struct rte_dma_conf *dev_conf,
 	uint32_t conf_sz)
 {
-	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint16_t i;
+	struct dpdmai_rx_queue_cfg rx_queue_cfg;
+	struct dpdmai_rx_queue_attr rx_attr;
+	struct dpdmai_tx_queue_attr tx_attr;
+	struct dpaa2_queue *rxq;
+	int ret = 0;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR("%s Not stopped, configure failed.",
-			dev->data->dev_name);
-		return -EBUSY;
+	if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+		DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+			dev->data->dev_name, dev_conf->nb_vchans,
+			dpdmai_dev->num_queues);
+
+		return -ENOTSUP;
+	}
+
+	if (qdma_dev->vqs) {
+		DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+			dev->data->dev_name,
+			qdma_dev->num_vqs, dev_conf->nb_vchans);
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if ((qdma_dev->vqs[i].num_enqueues !=
+				qdma_dev->vqs[i].num_dequeues) &&
+				!qdma_dev->is_silent) {
+				DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+					i, qdma_dev->vqs[i].num_enqueues -
+					qdma_dev->vqs[i].num_dequeues);
+				return -EBUSY;
+			}
+		}
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+			rxq = &dpdmai_dev->rx_queue[i];
+			if (rxq->q_storage) {
+				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+					dev->data->dev_name, i);
+				dpaa2_free_dq_storage(rxq->q_storage);
+				rte_free(rxq->q_storage);
+				rxq->q_storage = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+		qdma_dev->num_vqs = 0;
+	}
+
+	/* Set up Rx Queues */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		rxq = &dpdmai_dev->rx_queue[i];
+		ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			return ret;
+		}
+	}
+
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+		ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
 	}
 
 	/* Allocate Virtual Queues */
-	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
-	qdma_dev->vqs = rte_malloc(name,
+	qdma_dev->vqs = rte_zmalloc(NULL,
 		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
 		RTE_CACHE_LINE_SIZE);
 	if (!qdma_dev->vqs) {
@@ -995,13 +1077,50 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 			dev->data->dev_name, dev_conf->nb_vchans);
 		return -ENOMEM;
 	}
-	for (i = 0; i < dev_conf->nb_vchans; i++)
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		qdma_dev->vqs[i].vq_id = i;
+		rxq = &dpdmai_dev->rx_queue[i];
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq->q_storage = rte_zmalloc(NULL,
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
 	qdma_dev->is_silent = dev_conf->enable_silent;
 
 	return 0;
+
+alloc_failed:
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	rte_free(qdma_dev->vqs);
+	qdma_dev->vqs = NULL;
+	qdma_dev->num_vqs = 0;
+
+	return ret;
 }
 
 static int
@@ -1130,11 +1249,17 @@ static int
 dpaa2_qdma_start(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 1;
+	/* Enable the device */
+	ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1143,30 +1268,33 @@ static int
 dpaa2_qdma_stop(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 0;
+	/* Disable the device */
+	ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
 
 static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct dpaa2_queue *rxq;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR("%s Not stopped, reset failed.",
-			dev->data->dev_name);
-		return -EBUSY;
-	}
+	if (!qdma_dev)
+		return 0;
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
@@ -1180,8 +1308,31 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 		}
 	}
 
-	rte_free(qdma_dev->vqs);
-	qdma_dev->vqs = NULL;
+	/* Free RXQ storages */
+	for (i = 0; i < qdma_dev->num_vqs; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	if (qdma_dev->vqs) {
+		/* Free RXQ fle pool */
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+	}
 
 	/* Reset QDMA device structure */
 	qdma_dev->num_vqs = 0;
@@ -1190,18 +1341,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 }
 
 static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	dpaa2_qdma_reset(dev);
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1256,56 +1397,97 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpaa2_queue *rxq;
-	int ret, i;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai(%d) disable failed",
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
 			dpdmai_dev->dpdmai_id);
-	}
-
-	/* Set up the DQRR storage for Rx */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		return 0;
 	}
 
 	/* Close the device at underlying layer*/
-	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+	ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai(%d) close failed",
-			dpdmai_dev->dpdmai_id);
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+			dpdmai_dev->dpdmai_id, ret);
+
+		return ret;
+	}
+
+	if (qdma_dev) {
+		rte_free(qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
 	}
 
 	return ret;
 }
 
 static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpdmai_rx_queue_cfg rx_queue_cfg;
 	struct dpdmai_attr attr;
-	struct dpdmai_rx_queue_attr rx_attr;
-	struct dpdmai_tx_queue_attr tx_attr;
-	struct dpaa2_queue *rxq;
-	int ret, i;
+	int ret, err;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
+	if (!dpaa2_coherent_no_alloc_cache) {
+		if (dpaa2_svr_family == SVR_LX2160A) {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
+		} else {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_COHERENT_ALLOCATE_CACHE;
+		}
+	}
+
+	if (!s_proc_mc_reg.regs)
+		s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+			dpdmai_id);
+		if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+			DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+				dpdmai_id, dpdmai_dev->dpdmai_id);
+			return -EINVAL;
+		}
+		if (!dpdmai_dev->qdma_dev) {
+			DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+				dpdmai_id);
+			return -ENOMEM;
+		}
+		if (dpdmai_dev->qdma_dev->num_vqs) {
+			DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+				dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+		}
+
+		return 0;
+	}
+
 	/* Open DPDMAI device */
 	dpdmai_dev->dpdmai_id = dpdmai_id;
-	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL,
+
+	if (dpdmai_dev->qdma_dev) {
+		rte_free(dpdmai_dev->qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
+	dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
 		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
-	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+	if (!dpdmai_dev->qdma_dev) {
+		DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+			dpdmai_id);
+		return -ENOMEM;
+	}
+	ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
 			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
 	if (ret) {
 		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
@@ -1314,105 +1496,24 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 	}
 
 	/* Get DPDMAI attributes */
-	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+	ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
 			dpdmai_dev->token, &attr);
 	if (ret) {
 		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
 			__func__, dpdmai_dev->dpdmai_id, ret);
-		goto init_err;
-	}
-	dpdmai_dev->num_queues = attr.num_of_queues;
-
-	/* Set up Rx Queues */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-		ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				CMD_PRI_LOW,
-				dpdmai_dev->token,
-				i, 0, &rx_queue_cfg);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q%d set failed(%d)",
-				dev->data->dev_name, i, ret);
-			goto init_err;
-		}
-
-		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq = &dpdmai_dev->rx_queue[i];
-		rxq->q_storage = rte_malloc("dq_storage",
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s DQ info(Q%d) alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto init_err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s DQ storage(Q%d) alloc failed(%d)",
-				dev->data->dev_name, i, ret);
-			goto init_err;
-		}
-	}
-
-	/* Get Rx and Tx queues FQID's */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				dpdmai_dev->token, i, 0, &rx_attr);
-		if (ret) {
-			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
-				dpdmai_dev->dpdmai_id, i, ret);
-			goto init_err;
-		}
-		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
-
-		ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				dpdmai_dev->token, i, 0, &tx_attr);
-		if (ret) {
-			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
-				dpdmai_dev->dpdmai_id, i, ret);
-			goto init_err;
-		}
-		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
-	}
-
-	/* Enable the device */
-	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			    dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
-		goto init_err;
-	}
-
-	if (!dpaa2_coherent_no_alloc_cache) {
-		if (dpaa2_svr_family == SVR_LX2160A) {
-			dpaa2_coherent_no_alloc_cache =
-				DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
-			dpaa2_coherent_alloc_cache =
-				DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
-		} else {
-			dpaa2_coherent_no_alloc_cache =
-				DPAA2_COHERENT_NO_ALLOCATE_CACHE;
-			dpaa2_coherent_alloc_cache =
-				DPAA2_COHERENT_ALLOCATE_CACHE;
+		err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+		if (err) {
+			DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+				dpdmai_dev->dpdmai_id, err);
 		}
+		return ret;
 	}
+	dpdmai_dev->num_queues = attr.num_of_queues;
 
-	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
-
-	/* Reset the QDMA device */
-	ret = dpaa2_qdma_reset(dev);
-	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		goto init_err;
-	}
+	DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
 
 	return 0;
-init_err:
-	dpaa2_dpdmai_dev_uninit(dev);
-	return ret;
 }
 
 static int
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index ee34532408..743a43fa14 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -200,8 +200,6 @@ struct qdma_cntx_long {
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
 	TAILQ_ENTRY(dpaa2_qdma_device) next;
-	/** handle to DPDMAI object */
-	struct fsl_mc_io dpdmai;
 	/** HW ID for DPDMAI object */
 	uint32_t dpdmai_id;
 	/** Tocken of this device */
@@ -267,8 +265,6 @@ struct qdma_device {
 	struct qdma_virt_queue *vqs;
 	/** Total number of VQ's */
 	uint16_t num_vqs;
-	/** Device state - started or stopped */
-	uint8_t state;
 	uint8_t is_silent;
 };
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 05/30] dma/dpaa2: add sanity check for SG entry
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (3 preceding siblings ...)
  2024-07-22 11:58   ` [v2 04/30] dma/dpaa2: multiple process support Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
                     ` (25 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Make sure the SG entry number doesn't overflow.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 44b82c139e..7f6ebcb46b 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -615,8 +615,17 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
-	if (unlikely(nb_src != nb_dst))
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
+			nb_src, nb_dst);
 		return -ENOTSUP;
+	}
+
+	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
+			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+		return -EINVAL;
+	}
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 06/30] dma/dpaa2: include DPAA2 specific header files
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (4 preceding siblings ...)
  2024-07-22 11:58   ` [v2 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
                     ` (24 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include dpaa2_hw_pvt.h and dpaa2_hw_dpio.h files

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 743a43fa14..eb02bff08f 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -5,6 +5,9 @@
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
 #define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (5 preceding siblings ...)
  2024-07-22 11:58   ` [v2 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
                     ` (23 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For copy_sg: pass job index lists.
For copy: pass job index.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 92 ++++++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  7 ++
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h | 15 ++++-
 3 files changed, 68 insertions(+), 46 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 7f6ebcb46b..7de4894b35 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -280,25 +280,22 @@ sg_entry_post_populate(const struct rte_dma_sge *src,
 	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
 	uint16_t nb_sge)
 {
-	uint16_t i = 0, idx;
-	uint32_t total_len = 0, len;
+	uint16_t i;
+	uint32_t total_len = 0;
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
 	for (i = 0; i < (nb_sge - 1); i++) {
 		if (unlikely(src[i].length != dst[i].length))
 			return -ENOTSUP;
-		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
 		src_sge->addr_lo = (uint32_t)src[i].addr;
 		src_sge->addr_hi = (src[i].addr >> 32);
-		src_sge->data_len.data_len_sl0 = len;
+		src_sge->data_len.data_len_sl0 = src[i].length;
 
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
-		dst_sge->data_len.data_len_sl0 = len;
-		total_len += len;
-		sg_cntx->cntx_idx[i] = idx;
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
+		total_len += dst[i].length;
 
 		src_sge->ctrl.f = 0;
 		dst_sge->ctrl.f = 0;
@@ -309,19 +306,15 @@ sg_entry_post_populate(const struct rte_dma_sge *src,
 	if (unlikely(src[i].length != dst[i].length))
 		return -ENOTSUP;
 
-	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-	idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
-
 	src_sge->addr_lo = (uint32_t)src[i].addr;
 	src_sge->addr_hi = (src[i].addr >> 32);
-	src_sge->data_len.data_len_sl0 = len;
+	src_sge->data_len.data_len_sl0 = src[i].length;
 
 	dst_sge->addr_lo = (uint32_t)dst[i].addr;
 	dst_sge->addr_hi = (dst[i].addr >> 32);
-	dst_sge->data_len.data_len_sl0 = len;
+	dst_sge->data_len.data_len_sl0 = dst[i].length;
 
-	total_len += len;
-	sg_cntx->cntx_idx[i] = idx;
+	total_len += dst[i].length;
 	sg_cntx->job_nb = nb_sge;
 
 	src_sge->ctrl.f = QDMA_SG_F;
@@ -343,20 +336,18 @@ sg_entry_populate(const struct rte_dma_sge *src,
 	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
 	uint16_t nb_sge)
 {
-	uint16_t i, idx;
-	uint32_t total_len = 0, len;
+	uint16_t i;
+	uint32_t total_len = 0;
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
 	for (i = 0; i < nb_sge; i++) {
 		if (unlikely(src[i].length != dst[i].length))
 			return -ENOTSUP;
-		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
 
 		src_sge->addr_lo = (uint32_t)src[i].addr;
 		src_sge->addr_hi = (src[i].addr >> 32);
-		src_sge->data_len.data_len_sl0 = len;
+		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -366,7 +357,7 @@ sg_entry_populate(const struct rte_dma_sge *src,
 #endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
-		dst_sge->data_len.data_len_sl0 = len;
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -374,8 +365,7 @@ sg_entry_populate(const struct rte_dma_sge *src,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += len;
-		sg_cntx->cntx_idx[i] = idx;
+		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
@@ -606,14 +596,15 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	int ret = 0, expected;
-	uint32_t cntx_idx, len;
+	int ret = 0, expected, i;
+	uint32_t len;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_sg *cntx_sg = NULL;
 	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
+	const uint16_t *idx_addr = NULL;
 
 	if (unlikely(nb_src != nb_dst)) {
 		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
@@ -630,14 +621,16 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	memset(fd, 0, sizeof(struct qbman_fd));
 
 	if (qdma_dev->is_silent) {
-		cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[0].length);
-		cntx_sg = qdma_vq->cntx_sg[cntx_idx];
+		cntx_sg = qdma_vq->cntx_sg[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
 			(void **)&cntx_sg);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+		for (i = 0; i < nb_src; i++)
+			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -656,8 +649,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
 	if (qdma_vq->fle_pre_populate) {
-		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length))
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
 			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
+			if (!qdma_dev->is_silent && cntx_sg && idx_addr) {
+				for (i = 0; i < nb_src; i++)
+					cntx_sg->cntx_idx[i] = idx_addr[i];
+			}
+		}
 
 		len = sg_entry_post_populate(src, dst,
 			cntx_sg, nb_src);
@@ -683,6 +681,8 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
 	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
@@ -705,28 +705,23 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
-	uint16_t cntx_idx;
-	uint32_t len;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long;
+	struct qdma_cntx_long *cntx_long = NULL;
 	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length);
-	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length);
-
 	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[cntx_idx];
+		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
 			(void **)&cntx_long);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = cntx_idx;
+		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -749,16 +744,20 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 			fle_sdd_pre_populate(&cntx_long->fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+			if (!qdma_dev->is_silent && cntx_long) {
+				cntx_long->cntx_idx =
+					DPAA2_QDMA_IDX_FROM_FLAG(flags);
+			}
 		}
 
-		fle_post_populate(fle, src, dst, len);
+		fle_post_populate(fle, src, dst, length);
 	} else {
 		sdd = cntx_long->fle_sdd.sdd;
 		sdd_iova = cntx_iova +
 			offsetof(struct qdma_cntx_long, fle_sdd) +
 			offsetof(struct qdma_cntx_fle_sdd, sdd);
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
-			src, dst, len,
+			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
@@ -766,6 +765,8 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
 	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
@@ -963,14 +964,17 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
+		RTE_DMA_CAPA_MEM_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_MEM |
+		RTE_DMA_CAPA_SILENT |
+		RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index eb02bff08f..371393cb85 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -199,6 +199,12 @@ struct qdma_cntx_long {
 	uint16_t rsv[3];
 } __rte_packed;
 
+#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -256,6 +262,7 @@ struct qdma_virt_queue {
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
 	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	uint16_t silent_idx;
 
 	int num_valid_jobs;
 
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 729bff42bb..e49604c8fc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2023 NXP
  */
 
 #ifndef _RTE_PMD_DPAA2_QDMA_H_
@@ -20,6 +20,17 @@
 #define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
 	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
 
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 08/30] bus/fslmc: enhance the qbman dq storage logic
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (6 preceding siblings ...)
  2024-07-22 11:58   ` [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 09/30] dma/dpaa2: add short FD support Gagandeep Singh
                     ` (22 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Multiple DQ storages are used among multiple cores, the single dq
storage of first union is leak if multiple storages are allocated.
It does not make sense to keep the single dq storage of union,
remove it and reuse the first dq storage of multiple storages
for this case.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c    | 25 ++-----
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     | 38 +++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 23 ++----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c |  4 +-
 drivers/dma/dpaa2/dpaa2_qdma.c              | 43 ++---------
 drivers/net/dpaa2/dpaa2_ethdev.c            | 81 ++++++++-------------
 drivers/net/dpaa2/dpaa2_rxtx.c              | 19 +++--
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c       |  4 +-
 9 files changed, 103 insertions(+), 141 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 07256ed7ec..160126f6d6 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 		}
 
 		/* Allocate DQ storage for the DPCI Rx queues */
-		rxq = &(dpci_node->rx_queue[i]);
-		rxq->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_BUS_ERR("q_storage allocation failed\n");
-			ret = -ENOMEM;
+		rxq = &dpci_node->rx_queue[i];
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
-			goto err;
-		}
 	}
 
 	/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 
 err:
 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
-		struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
 
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 	rte_free(dpci_node);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 4aec7b2cd8..a8afc772fd 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
 	}
 }
 
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	int i = 0;
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
-		q_storage->dq_storage[i] = rte_malloc(NULL,
+		q_storage->dq_storage[i] = rte_zmalloc(NULL,
 			dpaa2_dqrr_size * sizeof(struct qbman_result),
 			RTE_CACHE_LINE_SIZE);
 		if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	}
 	return 0;
 fail:
-	while (--i >= 0)
+	while (--i >= 0) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
+	}
 
 	return -1;
 }
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..1ce481c88d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
 	uint64_t tx_pkts;
 	uint64_t err_pkts;
 	union {
-		struct queue_storage_info_t *q_storage;
+		/**Ingress*/
+		struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+		/**Egress*/
 		struct qbman_result *cscn;
 	};
 	struct rte_event ev;
@@ -186,6 +188,38 @@ struct swp_active_dqs {
 	uint64_t reserved[7];
 };
 
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+	int ret = 0, i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		(q)->q_storage[i] = rte_zmalloc(NULL, \
+			sizeof(struct queue_storage_info_t), \
+			RTE_CACHE_LINE_SIZE); \
+		if (!(q)->q_storage[i]) { \
+			ret = -ENOBUFS; \
+			break; \
+		} \
+		ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+		if (ret) \
+			break; \
+	} \
+	ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+	int i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		if ((q)->q_storage[i]) { \
+			dpaa2_free_dq_storage((q)->q_storage[i]); \
+			rte_free((q)->q_storage[i]); \
+			(q)->q_storage[i] = NULL; \
+		} \
+	} \
+})
+
 #define NUM_MAX_SWP 64
 
 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index c1f7181d55..7df208d004 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1891,7 +1891,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
@@ -1982,10 +1982,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (qp->rx_vq.q_storage) {
-		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
-		rte_free(qp->rx_vq.q_storage);
-	}
+	dpaa2_queue_storage_free(&qp->rx_vq, 1);
 	rte_mempool_free(qp->fle_pool);
 	rte_free(qp);
 
@@ -2036,18 +2033,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->rx_vq.crypto_data = dev->data;
 	qp->tx_vq.crypto_data = dev->data;
-	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
-		sizeof(struct queue_storage_info_t),
-		RTE_CACHE_LINE_SIZE);
-	if (!qp->rx_vq.q_storage) {
-		DPAA2_SEC_ERR("malloc failed for q_storage");
-		return -ENOMEM;
-	}
-	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
-	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
-		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
-		return -ENOMEM;
+	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+	if (retcode) {
+		dpaa2_queue_storage_free((&qp->rx_vq), 1);
+		return retcode;
 	}
 
 	dev->data->queue_pairs[qp_id] = qp;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 4754b9d6f8..c51e68f748 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
  */
 
 #include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 7de4894b35..53caccecd7 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #include <rte_eal.h>
@@ -824,7 +824,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		dpaa2_dqrr_size : nb_cpls;
 	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
 	fqid = rxq->fqid;
-	q_storage = rxq->q_storage;
+	q_storage = rxq->q_storage[0];
 
 	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
@@ -1032,13 +1032,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 				qdma_dev->vqs[i].ring_cntx_idx = NULL;
 			}
 			rxq = &dpdmai_dev->rx_queue[i];
-			if (rxq->q_storage) {
-				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
-					dev->data->dev_name, i);
-				dpaa2_free_dq_storage(rxq->q_storage);
-				rte_free(rxq->q_storage);
-				rxq->q_storage = NULL;
-			}
+			dpaa2_queue_storage_free(rxq, 1);
 		}
 		rte_free(qdma_dev->vqs);
 		qdma_dev->vqs = NULL;
@@ -1094,24 +1088,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		qdma_dev->vqs[i].vq_id = i;
 		rxq = &dpdmai_dev->rx_queue[i];
 		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq->q_storage = rte_zmalloc(NULL,
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto alloc_failed;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto alloc_failed;
-		}
 	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1122,11 +1101,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 alloc_failed:
 	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	rte_free(qdma_dev->vqs);
@@ -1324,11 +1299,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
 	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	if (qdma_dev->vqs) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 449bbda7ca..ac524d2964 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	uint8_t num_rxqueue_per_tc;
 	struct dpaa2_queue *mc_q, *mcq;
 	uint32_t tot_queues;
-	int i;
+	int i, ret = 0;
 	struct dpaa2_queue *dpaa2_q;
 
 	PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < priv->nb_rx_queues; i++) {
 		mc_q->eth_data = dev->data;
 		priv->rx_vq[i] = mc_q++;
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_q->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
-			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+		dpaa2_q = priv->rx_vq[i];
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
 	}
 
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 		if (!priv->rx_err_vq)
 			goto fail;
 
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
-					sizeof(struct queue_storage_info_t) *
-					RTE_MAX_LCORE,
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
+		dpaa2_q = priv->rx_err_vq;
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		for (i = 0; i < RTE_MAX_LCORE; i++)
-			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
-				goto fail;
 	}
 
 	for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 			mc_q->tc_index = i;
 			mc_q->flow_id = 0;
 			priv->tx_conf_vq[i] = mc_q++;
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-			dpaa2_q->q_storage =
-				rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-			if (!dpaa2_q->q_storage)
-				goto fail_tx_conf;
-
-			memset(dpaa2_q->q_storage, 0,
-			       sizeof(struct queue_storage_info_t));
-			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+			dpaa2_q = priv->tx_conf_vq[i];
+			ret = dpaa2_queue_storage_alloc(dpaa2_q,
+					RTE_MAX_LCORE);
+			if (ret)
 				goto fail_tx_conf;
 		}
 	}
 
 	vq_id = 0;
 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
-		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+		mcq = priv->rx_vq[vq_id];
 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 		vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 fail_tx_conf:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->tx_conf_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->tx_conf_vq[i--] = NULL;
 	}
 	i = priv->nb_tx_queues;
 fail_tx:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+		dpaa2_q = priv->tx_vq[i];
 		rte_free(dpaa2_q->cscn);
 		priv->tx_vq[i--] = NULL;
 	}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	i -= 1;
 	mc_q = priv->rx_vq[0];
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->rx_vq[i--] = NULL;
 	}
 
 	if (dpaa2_enable_err_queue) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		if (dpaa2_q->q_storage)
-			dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_err_vq;
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 	}
 
 	rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 	if (priv->rx_vq[0]) {
 		/* cleaning up queue storage */
 		for (i = 0; i < priv->nb_rx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-			rte_free(dpaa2_q->q_storage);
+			dpaa2_q = priv->rx_vq[i];
+			dpaa2_queue_storage_free(dpaa2_q,
+				RTE_MAX_LCORE);
 		}
 		/* cleanup tx queue cscn */
 		for (i = 0; i < priv->nb_tx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+			dpaa2_q = priv->tx_vq[i];
 			rte_free(dpaa2_q->cscn);
 		}
 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
 			/* cleanup tx conf queue storage */
 			for (i = 0; i < priv->nb_tx_queues; i++) {
-				dpaa2_q = (struct dpaa2_queue *)
-						priv->tx_conf_vq[i];
-				rte_free(dpaa2_q->q_storage);
+				dpaa2_q = priv->tx_conf_vq[i];
+				dpaa2_queue_storage_free(dpaa2_q,
+					RTE_MAX_LCORE);
 			}
 		}
 		/*free memory for all queues (RX+TX) */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 23f7c4132d..a0c057d183 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
 	if (unlikely(dpaa2_enable_err_queue))
 		dump_err_pkts(priv->rx_err_vq);
 
@@ -958,7 +960,7 @@ uint16_t
 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ */
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
 	struct qbman_pull_desc pulldesc;
 	struct qbman_eq_desc eqdesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
 	/* todo - currently we are using 1st TX queue only for loopback*/
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
 		ret = dpaa2_affine_qbman_ethrx_swp();
 		if (ret) {
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index aeee4ac289..5f4d0c68a4 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
  */
 
 #include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
 
 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
-	dq_storage = rxq->q_storage->dq_storage[0];
+	dq_storage = rxq->q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 09/30] dma/dpaa2: add short FD support
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (7 preceding siblings ...)
  2024-07-22 11:58   ` [v2 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
                     ` (21 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Short FD can be used for single transfer scenario which shows higher
performance than FLE.
1) Save index context in FD att field for short and FLE(NonSG).
2) Identify FD type by att of FD.
3) Force 48 bits address for source address and fle according to spec.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 314 +++++++++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  69 ++++--
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  13 -
 3 files changed, 285 insertions(+), 111 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 53caccecd7..d1358b686c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -522,7 +522,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 	const struct qdma_cntx_fle_sdd *fle_sdd;
 	const struct qdma_sdd *sdd;
 	const struct qdma_cntx_sg *cntx_sg = NULL;
-	const struct qdma_cntx_long *cntx_long = NULL;
 
 	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
 	sdd = fle_sdd->sdd;
@@ -545,11 +544,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		QBMAN_FLE_WORD4_FMT_SGE) {
 		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
 			fle_sdd);
-	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
 		QBMAN_FLE_WORD4_FMT_SBF) {
-		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
-			fle_sdd);
-	} else {
 		DPAA2_QDMA_ERR("Unsupported fle format:%d",
 			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
 		return;
@@ -560,11 +556,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		dpaa2_qdma_sdd_dump(&sdd[i]);
 	}
 
-	if (cntx_long) {
-		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
-			cntx_long->cntx_idx);
-	}
-
 	if (cntx_sg) {
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
@@ -582,6 +573,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
 				cntx_sg->cntx_idx[i]);
 		}
+	} else {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx");
 	}
 }
 
@@ -644,7 +637,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		offsetof(struct qdma_cntx_sg, fle_sdd) +
 		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
@@ -680,6 +673,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
+	dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -696,74 +690,178 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static inline void
+qdma_populate_fd_pci(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd,
+	struct dpaa2_qdma_rbp *rbp, int ser)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits(src);
+	fd->simple_pci.saddr_hi = upper_32_bits(src);
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+
+	fd->simple_pci.sportid = rbp->sportid;
+
+	fd->simple_pci.svfid = rbp->svfid;
+	fd->simple_pci.spfid = rbp->spfid;
+	fd->simple_pci.svfa = rbp->svfa;
+	fd->simple_pci.dvfid = rbp->dvfid;
+	fd->simple_pci.dpfid = rbp->dpfid;
+	fd->simple_pci.dvfa = rbp->dvfa;
+
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits(dest);
+	fd->simple_pci.daddr_hi = upper_32_bits(dest);
+}
+
+static inline void
+qdma_populate_fd_ddr(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd, int ser)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(src);
+	fd->simple_ddr.saddr_hi = upper_32_bits(src);
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss.
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits(dest);
+	fd->simple_ddr.daddr_hi = upper_32_bits(dest);
+}
+
 static int
-dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
+dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long = NULL;
-	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) {
+		/** PCIe EP*/
+		qdma_populate_fd_pci(src,
+			dst, length,
+			fd, &qdma_vq->rbp,
+			is_silent ? 0 : 1);
+	} else {
+		/** DDR or PCIe RC*/
+		qdma_populate_fd_ddr(src,
+			dst, length,
+			fd, is_silent ? 0 : 1);
+	}
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_SHORT);
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
+	} else {
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
+	}
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
+{
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_fle_sdd *fle_sdd = NULL;
+	rte_iova_t fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	if (is_silent) {
+		fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
-			(void **)&cntx_long);
+			(void **)&fle_sdd);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_long);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
-#endif
-
-	fle = cntx_long->fle_sdd.fle;
-	fle_iova = cntx_iova +
-		offsetof(struct qdma_cntx_long, fle_sdd) +
-		offsetof(struct qdma_cntx_fle_sdd, fle);
+	fle = fle_sdd->fle;
+	fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset;
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)fle);
 
 	if (qdma_vq->fle_pre_populate) {
 		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
-			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+			fle_sdd_pre_populate(fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
-			if (!qdma_dev->is_silent && cntx_long) {
-				cntx_long->cntx_idx =
-					DPAA2_QDMA_IDX_FROM_FLAG(flags);
-			}
 		}
 
 		fle_post_populate(fle, src, dst, length);
 	} else {
-		sdd = cntx_long->fle_sdd.sdd;
-		sdd_iova = cntx_iova +
-			offsetof(struct qdma_cntx_long, fle_sdd) +
-			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		sdd = fle_sdd->sdd;
+		sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset;
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
 			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
-		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
+		dpaa2_qdma_long_fmt_dump(fle);
 
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_LONG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -771,15 +869,89 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
 }
 
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	if (qdma_vq->using_short_fd)
+		return dpaa2_qdma_short_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+	else
+		return dpaa2_qdma_long_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+}
+
+static inline int
+dpaa2_qdma_dq_fd(const struct qbman_fd *fd,
+	struct qdma_virt_queue *qdma_vq,
+	uint16_t *free_space, uint16_t *fle_elem_nb)
+{
+	uint16_t idx, att;
+	enum dpaa2_qdma_fd_type type;
+	int ret;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+
+	att = dpaa2_qdma_fd_get_att(fd);
+	type = DPAA2_QDMA_FD_ATT_TYPE(att);
+	if (type == DPAA2_QDMA_FD_SHORT) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_LONG) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_SG) {
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, free_space);
+		if (unlikely(ret < cntx_sg->job_nb))
+			return -ENOSPC;
+
+		return 0;
+	}
+
+	DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x",
+		fd->simple_ddr.rsv1_att);
+	return -EIO;
+}
+
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
@@ -799,10 +971,6 @@ dpaa2_qdma_dequeue(void *dev_private,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	int ret, pull_size;
-	struct qbman_fle *fle;
-	struct qdma_cntx_fle_sdd *fle_sdd;
-	struct qdma_cntx_sg *cntx_sg;
-	struct qdma_cntx_long *cntx_long;
 	uint16_t free_space = 0, fle_elem_nb = 0;
 
 	if (unlikely(qdma_dev->is_silent))
@@ -901,25 +1069,8 @@ dpaa2_qdma_dequeue(void *dev_private,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
-		fle = fle_sdd->fle;
-		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
-		fle_elem_nb++;
-		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
-			QBMAN_FLE_WORD4_FMT_SGE) {
-			cntx_sg = container_of(fle_sdd,
-				struct qdma_cntx_sg, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				cntx_sg->cntx_idx,
-				cntx_sg->job_nb, &free_space);
-		} else {
-			cntx_long = container_of(fle_sdd,
-				struct qdma_cntx_long, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				&cntx_long->cntx_idx,
-				1, &free_space);
-		}
-		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
+		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -944,8 +1095,10 @@ dpaa2_qdma_dequeue(void *dev_private,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	rte_mempool_put_bulk(qdma_vq->fle_pool,
-		qdma_vq->fle_elem, fle_elem_nb);
+	if (fle_elem_nb > 0) {
+		rte_mempool_put_bulk(qdma_vq->fle_pool,
+			qdma_vq->fle_elem, fle_elem_nb);
+	}
 
 	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
 		cntx_idx, nb_cpls);
@@ -1178,11 +1331,18 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	else
 		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
+	/** Default Enable Short FD for nonSG format.
+	 * Short FD has higher perf than FLE.
+	 */
+	env = getenv("DPAA2_QDMA_USING_SHORT_FD");
+	if (env)
+		qdma_dev->vqs[vchan].using_short_fd = atoi(env);
+	else
+		qdma_dev->vqs[vchan].using_short_fd = 1;
+
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
-			    sizeof(struct qdma_cntx_long));
-
+	pool_size = sizeof(struct qdma_cntx_sg);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
 			DPAA2_QDMA_MAX_DESC * 2, pool_size,
 			512, 0, NULL, NULL, NULL, NULL,
@@ -1202,7 +1362,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 			return ret;
 		}
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
-				(void **)qdma_dev->vqs[vchan].cntx_long,
+				(void **)qdma_dev->vqs[vchan].cntx_fle_sdd,
 				DPAA2_QDMA_MAX_DESC);
 		if (ret) {
 			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 371393cb85..0be65e1cc6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -12,17 +12,8 @@
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
-#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT		(1ULL << 0)
-#define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
-#define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
-
-/** FLE pool cache size */
-#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
-
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
 #define DPAA2_RBP_MEM_RW            0x0
@@ -40,9 +31,14 @@
 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
-#define MAX_HW_QUEUE_PER_CORE		64
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
+#define DPAA2_QDMA_FD_LONG_FORMAT 0x1
+#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
 
-#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+#define DPAA2_QDMA_BMT_ENABLE 0x1
+#define DPAA2_QDMA_BMT_DISABLE 0x0
 
 /** Source/Destination Descriptor */
 struct qdma_sdd {
@@ -99,8 +95,8 @@ struct qdma_sdd {
 #define QDMA_SG_SL_SHORT	0x1 /* short length */
 #define QDMA_SG_SL_LONG	0x0 /* long length */
 #define QDMA_SG_F	0x1 /* last sg entry */
-#define QDMA_SG_BMT_ENABLE 0x1
-#define QDMA_SG_BMT_DISABLE 0x0
+#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
+#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
 
 struct qdma_sg_entry {
 	uint32_t addr_lo;		/* address 0:31 */
@@ -166,6 +162,40 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum dpaa2_qdma_fd_type {
+	DPAA2_QDMA_FD_SHORT = 1,
+	DPAA2_QDMA_FD_LONG = 2,
+	DPAA2_QDMA_FD_SG = 3
+};
+
+#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_TYPE(att) \
+	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
+#define DPAA2_QDMA_FD_ATT_CNTX(att) \
+	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+
+static inline void
+dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
+	uint64_t addr)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
+	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
+}
+
+static inline void
+dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
+	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
+{
+	fd->simple_ddr.rsv1_att = job_idx |
+		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
+}
+
+static inline uint16_t
+dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
+{
+	return fd->simple_ddr.rsv1_att;
+}
+
 enum {
 	DPAA2_QDMA_SDD_FLE,
 	DPAA2_QDMA_SRC_FLE,
@@ -193,12 +223,6 @@ struct qdma_cntx_sg {
 	uint16_t rsv[3];
 } __rte_packed;
 
-struct qdma_cntx_long {
-	struct qdma_cntx_fle_sdd fle_sdd;
-	uint16_t cntx_idx;
-	uint16_t rsv[3];
-} __rte_packed;
-
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
 
@@ -241,6 +265,7 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	uint64_t fle_iova2va_offset;
 	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
@@ -252,6 +277,7 @@ struct qdma_virt_queue {
 	uint64_t num_enqueues;
 	/* Total number of dequeues from this VQ */
 	uint64_t num_dequeues;
+	uint64_t copy_num;
 
 	uint16_t vq_id;
 	uint32_t flags;
@@ -261,10 +287,11 @@ struct qdma_virt_queue {
 
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
-	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
 	uint16_t silent_idx;
 
 	int num_valid_jobs;
+	int using_short_fd;
 
 	struct rte_dma_stats stats;
 };
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index e49604c8fc..df21b39cae 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,19 +7,6 @@
 
 #include <rte_compat.h>
 
-#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
-#define RTE_DPAA2_QDMA_LEN_MASK \
-	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
-	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
-
-#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
-	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
-	((length) & RTE_DPAA2_QDMA_LEN_MASK)
-
 #define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
 #define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
 	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 10/30] dma/dpaa2: limit the max descriptor number
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (8 preceding siblings ...)
  2024-07-22 11:58   ` [v2 09/30] dma/dpaa2: add short FD support Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
                     ` (20 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For non-SG format, the index is saved in FD with
DPAA2_QDMA_FD_ATT_TYPE_OFFSET(13) bits width.

The max descriptor number of ring is power of 2, so the
eventual max number is:
((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) / 2)

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 0be65e1cc6..250c83c83c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -8,8 +8,6 @@
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-#define DPAA2_QDMA_MAX_DESC		4096
-#define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
 #define DPAA2_DPDMAI_MAX_QUEUES	16
@@ -169,10 +167,15 @@ enum dpaa2_qdma_fd_type {
 };
 
 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_MAX_IDX \
+	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
-	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
+
+#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
+#define DPAA2_QDMA_MIN_DESC 1
 
 static inline void
 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
@@ -186,6 +189,7 @@ static inline void
 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
 {
+	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
 	fd->simple_ddr.rsv1_att = job_idx |
 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 11/30] dma/dpaa2: change the DMA copy return value
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (9 preceding siblings ...)
  2024-07-22 11:58   ` [v2 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
                     ` (19 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The return value of DMA copy/sg copy should be index of
descriptor copied in success.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index d1358b686c..b70750fede 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -605,6 +605,11 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -ENOTSUP;
 	}
 
+	if (unlikely(!nb_src)) {
+		DPAA2_QDMA_ERR("No SG entry specified");
+		return -EINVAL;
+	}
+
 	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
 			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
@@ -681,10 +686,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num += nb_src;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num += nb_src;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 12/30] dma/dpaa2: move the qdma header to common place
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (10 preceding siblings ...)
  2024-07-22 11:58   ` [v2 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 13/30] dma/dpaa: support multi channels Gagandeep Singh
                     ` (18 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include rte_pmd_dpaax_qdma.h instead of rte_pmd_dpaa2_qdma.h
and change code accordingly.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/api/doxy-api-index.md                 |  2 +-
 doc/api/doxy-api.conf.in                  |  2 +-
 drivers/common/dpaax/meson.build          |  3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h | 23 +++++++
 drivers/dma/dpaa2/dpaa2_qdma.c            | 84 +++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h            | 10 +--
 drivers/dma/dpaa2/meson.build             |  4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h    | 23 -------
 8 files changed, 72 insertions(+), 79 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f9283154f8..ab42440733 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
   [mlx5](@ref rte_pmd_mlx5.h),
   [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
   [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
-  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+  [dpaax](@ref rte_pmd_dpaax_qdma.h),
   [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
   [dlb2](@ref rte_pmd_dlb2.h),
   [ifpga](@ref rte_pmd_ifpga.h)
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a8823c046f..33250d867c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -8,7 +8,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/cnxk \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/dma/dpaa2 \
+                          @TOPDIR@/drivers/common/dpaax \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/event/cnxk \
                           @TOPDIR@/drivers/mempool/cnxk \
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
index a162779116..db61b76ce3 100644
--- a/drivers/common/dpaax/meson.build
+++ b/drivers/common/dpaax/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
 
 if not is_linux
     build = false
@@ -16,3 +16,4 @@ endif
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
+headers = files('rte_pmd_dpaax_qdma.h')
diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
new file mode 100644
index 0000000000..2552a4adfb
--- /dev/null
+++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index b70750fede..19d8af9416 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,7 +10,7 @@
 
 #include <mc/fsl_dpdmai.h>
 
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
@@ -212,16 +212,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 *
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
@@ -235,23 +235,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+	for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
 		/* source SG */
 		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		/* destination SG */
 		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 	}
 }
 
@@ -350,21 +348,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
 		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
 		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
@@ -444,17 +440,16 @@ fle_populate(struct qbman_fle fle[],
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
@@ -560,7 +555,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
 		if (!cntx_sg->job_nb ||
-			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
 			DPAA2_QDMA_ERR("Invalid SG job number:%d",
 				cntx_sg->job_nb);
 			return;
@@ -610,9 +605,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -EINVAL;
 	}
 
-	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+	if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
-			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+			nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
 		return -EINVAL;
 	}
 
@@ -631,11 +626,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+	cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
 
 	fle = cntx_sg->fle_sdd.fle;
 	fle_iova = cntx_iova +
@@ -667,8 +658,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			offsetof(struct qdma_cntx_sg, sg_src_entry);
 		dst_sge_iova = cntx_iova +
 			offsetof(struct qdma_cntx_sg, sg_dst_entry);
-		len = sg_entry_populate(src, dst,
-			cntx_sg, nb_src);
+		len = sg_entry_populate(src, dst, cntx_sg, nb_src);
 
 		fle_populate(fle, sdd, sdd_iova,
 			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1011,7 +1001,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 			q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			DPAA2_VADDR_TO_IOVA(dq_storage), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
 			       get_swp_active_dqs(
@@ -1046,7 +1036,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
-		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
 
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
@@ -1078,7 +1068,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
-		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -1131,11 +1121,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 		RTE_DMA_CAPA_SILENT |
 		RTE_DMA_CAPA_OPS_COPY |
 		RTE_DMA_CAPA_OPS_COPY_SG;
-	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
-	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1317,6 +1307,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char pool_name[64];
 	int ret;
 	char *env = NULL;
+	uint64_t iova, va;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1359,6 +1350,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
+	iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+	va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+	qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
 
 	if (qdma_dev->is_silent) {
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 250c83c83c..0fd1debaf8 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
 
 struct qdma_cntx_sg {
 	struct qdma_cntx_fle_sdd fle_sdd;
-	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
 	uint16_t job_nb;
 	uint16_t rsv[3];
 } __rte_packed;
 
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
-	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
 
 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
-	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
 
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build
index a99151e2a5..a523f5edb4 100644
--- a/drivers/dma/dpaa2/meson.build
+++ b/drivers/dma/dpaa2/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
 
 if not is_linux
     build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
deleted file mode 100644
index df21b39cae..0000000000
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
-	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
-	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
-	(((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
-	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 13/30] dma/dpaa: support multi channels
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (11 preceding siblings ...)
  2024-07-22 11:58   ` [v2 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
                     ` (17 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena

This patch add support to use multiple dma channel in the driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 10e65ef1d7..24ad7ad019 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #include <bus_dpaa_driver.h>
@@ -648,8 +648,8 @@ fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
 	}
 
 finally:
-	return fsl_qdma->desc_allocated++;
-
+	fsl_qdma->desc_allocated++;
+	return 0;
 exit:
 	return -ENOMEM;
 }
@@ -670,7 +670,7 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 			     RTE_DMA_CAPA_DEV_TO_MEM |
 			     RTE_DMA_CAPA_SILENT |
 			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 1;
+	dev_info->max_vchans = 4;
 	dev_info->max_desc = DPAADMA_MAX_DESC;
 	dev_info->min_desc = DPAADMA_MIN_DESC;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 14/30] dma/dpaa: fix job enqueue
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (12 preceding siblings ...)
  2024-07-22 11:58   ` [v2 13/30] dma/dpaa: support multi channels Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
                     ` (16 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: stable

The check shall be end instead of equal.

Fixes: 7da29a644c51 ("dma/dpaa: support DMA operations")
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 24ad7ad019..0a91cf040a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -615,7 +615,7 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 
 	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 
-	if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
 		reg |= FSL_QDMA_BCQMR_EI_BE;
 		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 15/30] dma/dpaa: add burst capacity API
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (13 preceding siblings ...)
  2024-07-22 11:58   ` [v2 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
                     ` (15 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Vanshika Shukla

From: Vanshika Shukla <vanshika.shukla@nxp.com>

This patch improves the dpaa qdma driver and
adds dpaa_qdma_burst_capacity API which returns the
remaining space in the descriptor ring.

Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 34 +++++++++++++++++++++++++---------
 drivers/dma/dpaa/dpaa_qdma.h |  3 +--
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 0a91cf040a..bb6b54e583 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -423,7 +423,6 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 static int
 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 				 void *block, int id, const uint16_t nb_cpls,
-				 uint16_t *last_idx,
 				 enum rte_dma_status_code *status)
 {
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
@@ -457,7 +456,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
 			fsl_status->virt_head = fsl_status->cq;
 		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
-		*last_idx = fsl_comp->index;
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
@@ -607,7 +605,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
 	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
 	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
-	fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
 	fsl_queue->virt_head++;
 
 	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
@@ -623,7 +620,7 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	} else {
 		fsl_queue->pending++;
 	}
-	return fsl_comp->index;
+	return 0;
 }
 
 static int
@@ -771,8 +768,10 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
 	struct fsl_qdma_chan *fsl_chan =
 		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	int ret;
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	int ret, idx;
 
+	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
 	void *fsl_comp = NULL;
 
 	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
@@ -783,8 +782,10 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 		return -1;
 	}
 	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	if (ret < 0)
+		return ret;
 
-	return ret;
+	return idx;
 }
 
 static uint16_t
@@ -826,8 +827,10 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, st);
+						st);
 	fsl_queue->stats.completed += intr;
+	if (last_idx != NULL)
+		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 
 	return intr;
 }
@@ -873,9 +876,10 @@ dpaa_qdma_dequeue(void *dev_private,
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, NULL);
+						NULL);
 	fsl_queue->stats.completed += intr;
-
+	if (last_idx != NULL)
+		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 	return intr;
 }
 
@@ -912,6 +916,17 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
+{
+	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
+	struct fsl_qdma_chan *fsl_chan =
+		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+	return fsl_queue->n_cq - fsl_queue->pending;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_info_get,
 	.dev_configure            = dpaa_qdma_configure,
@@ -1035,6 +1050,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
+	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
 	ret = dpaa_qdma_init(dmadev);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 7e9e76e21a..2092fb39f5 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #ifndef _DPAA_QDMA_H_
@@ -176,7 +176,6 @@ struct fsl_qdma_comp {
 	dma_addr_t		bus_addr;
 	dma_addr_t		desc_bus_addr;
 	void			*virt_addr;
-	int			index;
 	void			*desc_virt_addr;
 	struct fsl_qdma_chan	*qchan;
 	dma_call_back		call_back_func;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 16/30] dma/dpaa: add workaround for ERR050757
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (14 preceding siblings ...)
  2024-07-22 11:58   ` [v2 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
                     ` (14 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena

ERR050757 on LS104x indicates:

For outbound PCIe read transactions, a completion buffer is used
to store the PCIe completions till the data is passed back to the
initiator. At most 16 outstanding transactions are allowed and
maximum read request is 256 bytes. The completion buffer size
inside the controller needs to be at least 4KB, but the PCIe
controller has 3 KB of buffer. In case the size of pending
outbound read transactions of more than 3KB, the PCIe controller
may drop the incoming completions without notifying the initiator
of the transaction, leaving transactions unfinished. All
subsequent outbound reads to PCIe are blocked permanently.
To avoid qDMA hang as it keeps waiting for data that was silently
dropped, set stride mode for qDMA

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       |  3 ++-
 doc/guides/dmadevs/dpaa.rst  |  2 ++
 drivers/dma/dpaa/dpaa_qdma.c | 18 ++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h |  5 +++++
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 012935d5d7..f81e466318 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -468,7 +468,8 @@ soc_dpaa = {
         ['RTE_MACHINE', '"dpaa"'],
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
-        ['RTE_MAX_NUMA_NODES', 1]
+        ['RTE_MAX_NUMA_NODES', 1],
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index f99bfc6087..746919ec6b 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -42,6 +42,8 @@ Compilation
 For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
+- ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+
 Initialization
 --------------
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index bb6b54e583..a21279293c 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -159,6 +159,10 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 				      dma_addr_t dst, dma_addr_t src, u32 len)
 {
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+	u32 cfg = 0;
+#endif
 
 	/* Note: command table (fsl_comp->virt_addr) is getting filled
 	 * directly in cmd descriptors of queues while enqueuing the descriptor
@@ -171,6 +175,20 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
 	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
 
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
+	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+				FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
+		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
+					FSL_QDMA_CFG_SSS_OFFSET |
+					FSL_QDMA_CMD_SSS_DISTANCE);
+		sdf->cfg = cfg;
+	} else
+		sdf->cfg = 0;
+#endif
+
 	/* Status notification is enqueued to status queue. */
 	qdma_desc_addr_set64(csgf_src, src);
 	qdma_csgf_set_len(csgf_src, len);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 2092fb39f5..361f88856b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -81,6 +81,11 @@
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
 
+#define FSL_QDMA_CMD_SSEN		BIT(19)
+#define FSL_QDMA_CFG_SSS_OFFSET		12
+#define FSL_QDMA_CMD_SSS_STRIDE		128
+#define FSL_QDMA_CMD_SSS_DISTANCE	128
+
 #define QDMA_CCDF_STATUS		20
 #define QDMA_CCDF_OFFSET		20
 #define QDMA_CCDF_MASK			GENMASK(28, 20)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (15 preceding siblings ...)
  2024-07-22 11:58   ` [v2 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
                     ` (13 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena

Non-prefetchable read setting in the source descriptor may be
required for targets other than local memory. Prefetchable read
setting will offer better performance for misaligned transfers
in the form of fewer transactions and should be set if possible.
This patch also fixes QDMA stall issue due to unaligned
transactions.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       | 3 ++-
 doc/guides/dmadevs/dpaa.rst  | 1 +
 drivers/dma/dpaa/dpaa_qdma.c | 6 ++++++
 drivers/dma/dpaa/dpaa_qdma.h | 1 +
 4 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index f81e466318..f63ef41130 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -469,7 +469,8 @@ soc_dpaa = {
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
         ['RTE_MAX_NUMA_NODES', 1],
-	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true],
+	['RTE_DMA_DPAA_ERRATA_ERR050265', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 746919ec6b..8a7c0befc3 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -43,6 +43,7 @@ For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
 - ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+- ``RTE_DMA_DPAA_ERRATA_ERR050265`` - enable software workaround for Errata-A050265
 
 Initialization
 --------------
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index a21279293c..f1878879af 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -179,6 +179,9 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
 	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 				FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
 	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
 		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
 		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
@@ -247,6 +250,9 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		/* Descriptor Buffer */
 		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
 		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 361f88856b..8cb4042bd0 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -80,6 +80,7 @@
 
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
+#define FSL_QDMA_CMD_PF			BIT(17)
 
 #define FSL_QDMA_CMD_SSEN		BIT(19)
 #define FSL_QDMA_CFG_SSS_OFFSET		12
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 18/30] dma/dpaa: remove unwanted desc
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (16 preceding siblings ...)
  2024-07-22 11:58   ` [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 19/30] dma/dpaa: data path optimization Gagandeep Singh
                     ` (12 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena

remove unwanted descriptor list maintenance
and channels overhead.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 594 +++++++++++++----------------------
 drivers/dma/dpaa/dpaa_qdma.h |  43 +--
 2 files changed, 221 insertions(+), 416 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index f1878879af..8e8426b88d 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -111,96 +111,6 @@ static void
 	return virt_addr;
 }
 
-static void
-dma_pool_free(void *addr)
-{
-	rte_free(addr);
-}
-
-static void
-fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
-	int id;
-
-	if (--fsl_queue->count)
-		goto finally;
-
-	id = (fsl_qdma->block_base - fsl_queue->block_base) /
-	      fsl_qdma->block_offset;
-
-	while (rte_atomic32_read(&wait_task[id]) == 1)
-		rte_delay_us(QDMA_DELAY);
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_used,	list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-finally:
-	fsl_qdma->desc_allocated--;
-}
-
-static void
-fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-				      dma_addr_t dst, dma_addr_t src, u32 len)
-{
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	struct fsl_qdma_sdf *sdf;
-	u32 cfg = 0;
-#endif
-
-	/* Note: command table (fsl_comp->virt_addr) is getting filled
-	 * directly in cmd descriptors of queues while enqueuing the descriptor
-	 * please refer fsl_qdma_enqueue_desc
-	 * frame list table (virt_addr) + 1) and source,
-	 * destination descriptor table
-	 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
-	 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
-	 */
-	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
-	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-				FSL_QDMA_CMD_RWTTYPE_OFFSET);
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
-#endif
-	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
-		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
-					FSL_QDMA_CFG_SSS_OFFSET |
-					FSL_QDMA_CMD_SSS_DISTANCE);
-		sdf->cfg = cfg;
-	} else
-		sdf->cfg = 0;
-#endif
-
-	/* Status notification is enqueued to status queue. */
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-}
-
 /*
  * Pre-request command descriptor and compound S/G for enqueue.
  */
@@ -209,42 +119,41 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 					struct fsl_qdma_queue *queue,
 					int size, int aligned)
 {
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_format *csgf_desc;
-	int i;
-
-	for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
-		comp_temp = rte_zmalloc("qdma: comp temp",
-					sizeof(*comp_temp), 0);
-		if (!comp_temp)
-			return -ENOMEM;
-
-		comp_temp->virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
-		if (!comp_temp->virt_addr) {
-			rte_free(comp_temp);
+	struct fsl_qdma_format *ccdf;
+	int i, j;
+	struct fsl_qdma_format *head;
+
+	head = queue->virt_head;
+
+	for (i = 0; i < (int)(queue->n_cq); i++) {
+		dma_addr_t bus_addr = 0, desc_bus_addr = 0;
+
+		queue->virt_addr[i] =
+		dma_pool_alloc(size, aligned, &bus_addr);
+		if (!queue->virt_addr[i])
 			goto fail;
-		}
 
-		comp_temp->desc_virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
-		if (!comp_temp->desc_virt_addr) {
-			rte_free(comp_temp->virt_addr);
-			rte_free(comp_temp);
+		queue->desc_virt_addr[i] =
+		dma_pool_alloc(size, aligned, &desc_bus_addr);
+		if (!queue->desc_virt_addr[i]) {
+			rte_free(queue->virt_addr[i]);
 			goto fail;
 		}
 
-		memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(comp_temp->desc_virt_addr, 0,
+		memset(queue->virt_addr[i], 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
+		memset(queue->desc_virt_addr[i], 0,
 		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
 
-		csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
-		sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
-		ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+		csgf_desc = (struct fsl_qdma_format *)queue->virt_addr[i] +
+			    QDMA_DESC_OFF;
+		sdf = (struct fsl_qdma_sdf *)queue->desc_virt_addr[i];
+		ddf = (struct fsl_qdma_ddf *)sdf + QDMA_DESC_OFF;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+		qdma_desc_addr_set64(csgf_desc, desc_bus_addr);
+
 		/* It must be 32 as Compound S/G Descriptor */
 		qdma_csgf_set_len(csgf_desc, 32);
 		/* Descriptor Buffer */
@@ -258,106 +167,84 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
 				FSL_QDMA_CMD_LWC_OFFSET);
 
-		list_add_tail(&comp_temp->list, &queue->comp_free);
+		ccdf = (struct fsl_qdma_format *)queue->virt_head;
+		qdma_desc_addr_set64(ccdf, bus_addr + 16);
+		qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(queue->virt_addr[i]));
+		qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(queue->virt_addr[i]));
+		queue->virt_head++;
 	}
+	queue->virt_head = head;
+	queue->ci = 0;
 
 	return 0;
 
 fail:
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		rte_free(comp_temp->virt_addr);
-		rte_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
+	for (j = 0; j < i; j++) {
+		rte_free(queue->virt_addr[j]);
+		rte_free(queue->desc_virt_addr[j]);
 	}
 
 	return -ENOMEM;
 }
 
-/*
- * Request a command descriptor for enqueue.
- */
-static struct fsl_qdma_comp *
-fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+static struct fsl_qdma_queue
+*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, int k, int b)
 {
-	struct fsl_qdma_queue *queue = fsl_chan->queue;
-	struct fsl_qdma_comp *comp_temp;
-
-	if (!list_empty(&queue->comp_free)) {
-		comp_temp = list_first_entry(&queue->comp_free,
-					     struct fsl_qdma_comp,
-					     list);
-		list_del(&comp_temp->list);
-		return comp_temp;
+	struct fsl_qdma_queue *queue_temp;
+
+	queue_temp = rte_zmalloc("qdma: queue head", sizeof(*queue_temp), 0);
+	if (!queue_temp) {
+		printf("no memory to allocate queues\n");
+		return NULL;
 	}
 
-	return NULL;
-}
+	queue_temp->cq =
+	dma_pool_alloc(sizeof(struct fsl_qdma_format) *
+		       QDMA_QUEUE_SIZE,
+		       sizeof(struct fsl_qdma_format) *
+		       QDMA_QUEUE_SIZE, &queue_temp->bus_addr);
 
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
-{
-	struct fsl_qdma_queue *queue_head, *queue_temp;
-	int len, i, j;
-	int queue_num;
-	int blocks;
-	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-
-	queue_num = fsl_qdma->n_queues;
-	blocks = fsl_qdma->num_blocks;
-
-	len = sizeof(*queue_head) * queue_num * blocks;
-	queue_head = rte_zmalloc("qdma: queue head", len, 0);
-	if (!queue_head)
+	if (!queue_temp->cq) {
+		rte_free(queue_temp);
 		return NULL;
-
-	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
-		queue_size[i] = QDMA_QUEUE_SIZE;
-
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-				DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
-				goto fail;
-			}
-			queue_temp = queue_head + i + (j * queue_num);
-
-			queue_temp->cq =
-			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-				       queue_size[i],
-				       sizeof(struct fsl_qdma_format) *
-				       queue_size[i], &queue_temp->bus_addr);
-
-			if (!queue_temp->cq)
-				goto fail;
-
-			memset(queue_temp->cq, 0x0, queue_size[i] *
-			       sizeof(struct fsl_qdma_format));
-
-			queue_temp->block_base = fsl_qdma->block_base +
-				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-			queue_temp->n_cq = queue_size[i];
-			queue_temp->id = i;
-			queue_temp->count = 0;
-			queue_temp->pending = 0;
-			queue_temp->virt_head = queue_temp->cq;
-			queue_temp->stats = (struct rte_dma_stats){0};
-		}
 	}
-	return queue_head;
 
-fail:
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			queue_temp = queue_head + i + (j * queue_num);
-			dma_pool_free(queue_temp->cq);
-		}
+	memset(queue_temp->cq, 0x0, QDMA_QUEUE_SIZE *
+	       sizeof(struct fsl_qdma_format));
+
+	queue_temp->queue_base = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, b);
+	queue_temp->n_cq = QDMA_QUEUE_SIZE;
+	queue_temp->id = k;
+	queue_temp->pending = 0;
+	queue_temp->virt_head = queue_temp->cq;
+	queue_temp->virt_addr = rte_malloc("queue virt addr",
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!queue_temp->virt_addr) {
+		rte_free(queue_temp->cq);
+		rte_free(queue_temp);
+		return NULL;
 	}
-	rte_free(queue_head);
+	queue_temp->desc_virt_addr = rte_malloc("queue desc virt addr",
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!queue_temp->desc_virt_addr) {
+		rte_free(queue_temp->virt_addr);
+		rte_free(queue_temp->cq);
+		rte_free(queue_temp);
+		return NULL;
+	}
+	queue_temp->stats = (struct rte_dma_stats){0};
+
+	return queue_temp;
+}
 
-	return NULL;
+static void
+fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
+{
+	rte_free(queue->desc_virt_addr);
+	rte_free(queue->virt_addr);
+	rte_free(queue->cq);
+	rte_free(queue);
 }
 
 static struct
@@ -367,11 +254,6 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	unsigned int status_size;
 
 	status_size = QDMA_STATUS_SIZE;
-	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-		DPAA_QDMA_ERR("Get wrong status_size.\n");
-		return NULL;
-	}
 
 	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
 	if (!status_head)
@@ -399,6 +281,13 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	return status_head;
 }
 
+static void
+fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
+{
+	rte_free(status->cq);
+	rte_free(status);
+}
+
 static int
 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -449,12 +338,9 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 				 void *block, int id, const uint16_t nb_cpls,
 				 enum rte_dma_status_code *status)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_queue *temp_queue;
 	struct fsl_qdma_format *status_addr;
-	struct fsl_qdma_comp *fsl_comp = NULL;
-	u32 reg, i;
+	u32 reg;
 	int count = 0;
 
 	while (count < nb_cpls) {
@@ -464,14 +350,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 
 		status_addr = fsl_status->virt_head;
 
-		i = qdma_ccdf_get_queue(status_addr) +
-			id * fsl_qdma->n_queues;
-		temp_queue = fsl_queue + i;
-		fsl_comp = list_first_entry(&temp_queue->comp_used,
-					    struct fsl_qdma_comp,
-					    list);
-		list_del(&fsl_comp->list);
-
 		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
 		reg |= FSL_QDMA_BSQMR_DI_BE;
 
@@ -483,7 +361,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
-		list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
 		count++;
 
 	}
@@ -493,7 +370,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *temp;
 	void *ctrl = fsl_qdma->ctrl_base;
 	void *block;
@@ -508,11 +384,13 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		return ret;
 	}
 
+	int k = 0;
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		for (i = 0; i < fsl_qdma->n_queues; i++) {
-			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+		k = 0;
+		for (i = (j * QDMA_QUEUES); i < ((j * QDMA_QUEUES) + QDMA_QUEUES); i++) {
+			temp = fsl_qdma->queue[i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -522,19 +400,20 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			 */
 
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQDPA_SADDR(i));
+				    block + FSL_QDMA_BCQDPA_SADDR(k));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEDPA_SADDR(i));
+				    block + FSL_QDMA_BCQEDPA_SADDR(k));
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEPA_SADDR(i));
+				    block + FSL_QDMA_BCQEPA_SADDR(k));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEEPA_SADDR(i));
+				    block + FSL_QDMA_BCQEEPA_SADDR(k));
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
 			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
+			qdma_writel(reg, block + FSL_QDMA_BCQMR(k));
+			k++;
 		}
 
 		/*
@@ -585,36 +464,19 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static void *
-fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
-			   dma_addr_t src, size_t len,
-			   void *call_back,
-			   void *param)
-{
-	struct fsl_qdma_comp *fsl_comp;
-
-	fsl_comp =
-	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
-	if (!fsl_comp)
-		return NULL;
-
-	fsl_comp->qchan = fsl_chan;
-	fsl_comp->call_back_func = call_back;
-	fsl_comp->params = param;
-
-	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-	return (void *)fsl_comp;
-}
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
-				  struct fsl_qdma_comp *fsl_comp,
-				  uint64_t flags)
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
+				  uint64_t flags, dma_addr_t dst,
+				  dma_addr_t src, size_t len)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	struct fsl_qdma_format *ccdf;
+	void *block = fsl_queue->queue_base;
+	struct fsl_qdma_format *csgf_src, *csgf_dest;
 	u32 reg;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+	u32 cfg = 0;
+#endif
 
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
@@ -624,17 +486,40 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
 		return -1;
 
-	/* filling descriptor  command table */
-	ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
-	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
-	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
+	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
+		   QDMA_SGF_SRC_OFF;
+	csgf_dest = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
+		    QDMA_SGF_DST_OFF;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = (struct fsl_qdma_sdf *)fsl_queue->desc_virt_addr[fsl_queue->ci];
+	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+			FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
+	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
+		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
+					FSL_QDMA_CFG_SSS_OFFSET |
+					FSL_QDMA_CMD_SSS_DISTANCE);
+		sdf->cfg = cfg;
+	} else
+		sdf->cfg = 0;
+#endif
+	qdma_desc_addr_set64(csgf_src, src);
+	qdma_csgf_set_len(csgf_src, len);
+	qdma_desc_addr_set64(csgf_dest, dst);
+	qdma_csgf_set_len(csgf_dest, len);
+	/* This entry is the last entry. */
+	qdma_csgf_set_f(csgf_dest, len);
 	fsl_queue->virt_head++;
+	fsl_queue->ci++;
 
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) {
 		fsl_queue->virt_head = fsl_queue->cq;
+		fsl_queue->ci = 0;
+	}
 
-	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
@@ -647,34 +532,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	return 0;
 }
 
-static int
-fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	int ret;
-
-	if (fsl_queue->count++)
-		goto finally;
-
-	INIT_LIST_HEAD(&fsl_queue->comp_free);
-	INIT_LIST_HEAD(&fsl_queue->comp_used);
-
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-				FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
-	if (ret) {
-		DPAA_QDMA_ERR(
-			"failed to alloc dma buffer for comp descriptor\n");
-		goto exit;
-	}
-
-finally:
-	fsl_qdma->desc_allocated++;
-	return 0;
-exit:
-	return -ENOMEM;
-}
-
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 	      uint32_t info_sz)
@@ -701,35 +558,26 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 static int
 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
 {
-	u32 i, start, end;
+	u32 i;
 	int ret;
+	struct fsl_qdma_queue *fsl_queue;
 
-	start = fsl_qdma->free_block_id * QDMA_QUEUES;
-	fsl_qdma->free_block_id++;
-
-	end = start + 1;
-	for (i = start; i < end; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		if (fsl_chan->free) {
-			fsl_chan->free = false;
-			ret = fsl_qdma_alloc_chan_resources(fsl_chan);
-			if (ret)
-				return ret;
-
-			fsl_qdma->vchan_map[vchan] = i;
-			return 0;
-		}
+	if (fsl_qdma->free_block_id == QDMA_BLOCKS) {
+		DPAA_QDMA_ERR("Maximum 4 queues can be configured\n");
+		return -1;
 	}
 
-	return -1;
-}
+	i = fsl_qdma->free_block_id * QDMA_QUEUES;
 
-static void
-dma_release(void *fsl_chan)
-{
-	((struct fsl_qdma_chan *)fsl_chan)->free = true;
-	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+	fsl_queue = fsl_qdma->queue[i];
+	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
+			FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+	if (ret)
+		return ret;
+
+	fsl_qdma->vchan_map[vchan] = i;
+	fsl_qdma->free_block_id++;
+	return 0;
 }
 
 static int
@@ -767,10 +615,9 @@ static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	void *block = fsl_queue->queue_base;
 	u32 reg;
 
 	while (fsl_queue->pending) {
@@ -790,22 +637,13 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 		  uint32_t length, uint64_t flags)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	int ret, idx;
 
 	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
-	void *fsl_comp = NULL;
 
-	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
-			(dma_addr_t)dst, (dma_addr_t)src,
-			length, NULL, NULL);
-	if (!fsl_comp) {
-		DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n");
-		return -1;
-	}
-	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, (dma_addr_t)dst, (dma_addr_t)src, length);
 	if (ret < 0)
 		return ret;
 
@@ -822,9 +660,8 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -870,9 +707,8 @@ dpaa_qdma_dequeue(void *dev_private,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -912,9 +748,8 @@ dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
 		    struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -931,9 +766,8 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	fsl_queue->stats = (struct rte_dma_stats){0};
 
@@ -944,9 +778,8 @@ static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
 	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -965,43 +798,21 @@ static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan;
 	uint64_t phys_addr;
-	unsigned int len;
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i;
+	u32 i, k = 0;
+	int j;
 
-	fsl_qdma->desc_allocated = 0;
-	fsl_qdma->n_chans = VIRT_CHANNELS;
-	fsl_qdma->n_queues = QDMA_QUEUES;
+	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
 
-	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
-	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
-	if (!fsl_qdma->chans)
-		return -1;
-
-	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
-	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
-	if (!fsl_qdma->status) {
-		rte_free(fsl_qdma->chans);
-		return -1;
-	}
-
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		rte_atomic32_init(&wait_task[i]);
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
-		if (!fsl_qdma->status[i])
-			goto err;
-	}
-
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		goto err;
+		return -1;
 	}
 
 	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
@@ -1014,39 +825,55 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (fsl_qdma->ctrl_base == MAP_FAILED) {
 		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
 		       "size %d\n", phys_addr, regs_size);
-		goto err;
+		return -1;
 	}
 
 	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
 	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
 
-	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
+	fsl_qdma->status = rte_malloc("status queue", sizeof(struct fsl_qdma_queue) * 4, 0);
+	if (!fsl_qdma->status)
+		goto err;
+
+	fsl_qdma->queue = rte_malloc("cmd queue", sizeof(struct fsl_qdma_queue) * 32, 0);
 	if (!fsl_qdma->queue) {
-		munmap(fsl_qdma->ctrl_base, regs_size);
+		rte_free(fsl_qdma->status);
 		goto err;
 	}
 
-	for (i = 0; i < fsl_qdma->n_chans; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+		if (!fsl_qdma->status[i])
+			goto mem_free;
+		j = 0;
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++) {
+			fsl_qdma->queue[k] = fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
+			if (!fsl_qdma->queue[k])
+				goto mem_free;
+			j++;
+		}
 
-		fsl_chan->qdma = fsl_qdma;
-		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-							fsl_qdma->num_blocks);
-		fsl_chan->free = true;
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+		rte_free(fsl_qdma->status);
+		goto mem_free;
 	}
 
 	return 0;
 
-err:
-	rte_free(fsl_qdma->chans);
+mem_free:
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
+			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
+	}
 	rte_free(fsl_qdma->status);
+err:
+	rte_free(fsl_qdma->queue);
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	return -1;
 }
@@ -1092,17 +919,16 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+	uint32_t i, k;
 
-	for (i = 0; i < max; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		if (fsl_chan->free == false)
-			dma_release(fsl_chan);
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
+			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
 	}
 
+	rte_free(fsl_qdma->queue);
 	rte_free(fsl_qdma->status);
-	rte_free(fsl_qdma->chans);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 8cb4042bd0..80366ce890 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -107,6 +107,9 @@
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
 #define QDMA_DELAY			1000
+#define QDMA_SGF_SRC_OFF		2
+#define QDMA_SGF_DST_OFF		3
+#define QDMA_DESC_OFF			1
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -157,55 +160,31 @@ struct fsl_qdma_ddf {
 	__le32 cmd;
 };
 
-struct fsl_qdma_chan {
-	struct fsl_qdma_engine	*qdma;
-	struct fsl_qdma_queue	*queue;
-	bool			free;
-	struct list_head	list;
-};
-
 struct fsl_qdma_queue {
 	struct fsl_qdma_format	*virt_head;
-	struct list_head	comp_used;
-	struct list_head	comp_free;
-	dma_addr_t		bus_addr;
-	u32			n_cq;
-	u32			id;
-	u32			count;
-	u32			pending;
+	void                    **virt_addr;
+	u8			ci;
+	u8			n_cq;
+	u8			id;
+	void			*queue_base;
 	struct fsl_qdma_format	*cq;
-	void			*block_base;
 	struct rte_dma_stats	stats;
-};
-
-struct fsl_qdma_comp {
+	u8			pending;
 	dma_addr_t		bus_addr;
-	dma_addr_t		desc_bus_addr;
-	void			*virt_addr;
-	void			*desc_virt_addr;
-	struct fsl_qdma_chan	*qchan;
-	dma_call_back		call_back_func;
-	void			*params;
-	struct list_head	list;
+	void			**desc_virt_addr;
 };
 
 struct fsl_qdma_engine {
-	int			desc_allocated;
 	void			*ctrl_base;
 	void			*status_base;
 	void			*block_base;
-	u32			n_chans;
 	u32			n_queues;
-	int			error_irq;
-	struct fsl_qdma_queue	*queue;
+	struct fsl_qdma_queue	**queue;
 	struct fsl_qdma_queue	**status;
-	struct fsl_qdma_chan	*chans;
 	u32			num_blocks;
 	u8			free_block_id;
 	u32			vchan_map[4];
 	int			block_offset;
 };
 
-static rte_atomic32_t wait_task[CORE_NUMBER];
-
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 19/30] dma/dpaa: data path optimization
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (17 preceding siblings ...)
  2024-07-22 11:58   ` [v2 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 20/30] dma/dpaa: refactor driver Gagandeep Singh
                     ` (11 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena

Remove unnessary status read before every send.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 186 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 ++
 2 files changed, 101 insertions(+), 92 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8e8426b88d..4022ad6469 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -248,7 +248,8 @@ fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 }
 
 static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+					   u32 id)
 {
 	struct fsl_qdma_queue *status_head;
 	unsigned int status_size;
@@ -277,6 +278,8 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	       sizeof(struct fsl_qdma_format));
 	status_head->n_cq = status_size;
 	status_head->virt_head = status_head->cq;
+	status_head->queue_base = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	return status_head;
 }
@@ -334,12 +337,9 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
+fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
 				 enum rte_dma_status_code *status)
 {
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_format *status_addr;
 	u32 reg;
 	int count = 0;
 
@@ -348,16 +348,7 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (reg & FSL_QDMA_BSQSR_QE_BE)
 			return count;
 
-		status_addr = fsl_status->virt_head;
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
@@ -472,19 +463,37 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 {
 	void *block = fsl_queue->queue_base;
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
-	u32 reg;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
 	u32 cfg = 0;
 #endif
 
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	u32 reg;
+
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
 	 */
 	reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
+	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
+		DPAA_QDMA_ERR("QDMA Engine is busy\n");
 		return -1;
+	}
+#else
+	/* check whether critical watermark level reached,
+	 * below check is valid for only single queue per block
+	 */
+	if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
+			>= QDMA_QUEUE_CR_WM) {
+		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
+		return -1;
+	}
+#endif
+	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
+		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
+		return -1;
+	}
 
 	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
 		   QDMA_SGF_SRC_OFF;
@@ -512,19 +521,14 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 	qdma_csgf_set_len(csgf_dest, len);
 	/* This entry is the last entry. */
 	qdma_csgf_set_f(csgf_dest, len);
-	fsl_queue->virt_head++;
 	fsl_queue->ci++;
 
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) {
-		fsl_queue->virt_head = fsl_queue->cq;
+	if (fsl_queue->ci == fsl_queue->n_cq)
 		fsl_queue->ci = 0;
-	}
-
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI,
+			       block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->stats.submitted++;
 	} else {
 		fsl_queue->pending++;
@@ -618,12 +622,9 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	void *block = fsl_queue->queue_base;
-	u32 reg;
 
 	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI, block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->pending--;
 		fsl_queue->stats.submitted++;
 	}
@@ -656,44 +657,43 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			 enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	void *status = fsl_qdma->status_base;
+	int intr;
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, st);
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+#endif
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			fsl_queue->stats.errors++;
+		}
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						st);
-	fsl_queue->stats.completed += intr;
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 
-	return intr;
+	return ret;
 }
 
 
@@ -703,44 +703,46 @@ dpaa_qdma_dequeue(void *dev_private,
 		  uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	void *status = fsl_qdma->status_base;
+	int intr;
+#endif
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
+	*has_error = false;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, NULL);
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			intr = qdma_readl(status + FSL_QDMA_DEDR);
+			*has_error = true;
+			fsl_queue->stats.errors++;
+		}
 	}
-
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						NULL);
-	fsl_queue->stats.completed += intr;
+#endif
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
-	return intr;
+	return ret;
 }
 
 static int
@@ -842,7 +844,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	}
 
 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
 		if (!fsl_qdma->status[i])
 			goto mem_free;
 		j = 0;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 80366ce890..8a4517a70a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -58,11 +58,17 @@
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
+/* Update the value appropriately whenever QDMA_QUEUE_SIZE
+ * changes.
+ */
+#define FSL_QDMA_BCQMR_EI		0x20c0
+
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
 #define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -110,6 +116,7 @@
 #define QDMA_SGF_SRC_OFF		2
 #define QDMA_SGF_DST_OFF		3
 #define QDMA_DESC_OFF			1
+#define QDMA_QUEUE_CR_WM		32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 20/30] dma/dpaa: refactor driver
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (18 preceding siblings ...)
  2024-07-22 11:58   ` [v2 19/30] dma/dpaa: data path optimization Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
                     ` (10 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Return complete index instead of total complete counter
in complete callback.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 534 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h | 109 +++----
 2 files changed, 330 insertions(+), 313 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 4022ad6469..dc17aa4520 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -15,19 +15,6 @@ qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
-{
-	return ccdf->cfg8b_w1 & 0xff;
-}
-
-static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
-{
-	return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_OFFSET;
-}
-
 static inline void
 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
 {
@@ -59,8 +46,7 @@ qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
 	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
 }
 
-static inline int
-ilog2(int x)
+static inline int ilog2(int x)
 {
 	int log = 0;
 
@@ -73,32 +59,50 @@ ilog2(int x)
 	return log;
 }
 
-static u32
+static inline int ilog2_qsize(uint32_t q_size)
+{
+	return (ilog2(q_size) - ilog2(64));
+}
+
+static inline int ilog2_qthld(uint32_t q_thld)
+{
+	return (ilog2(q_thld) - ilog2(16));
+}
+
+static inline int
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
+{
+	struct rte_dma_stats *stats = &fsl_queue->stats;
+
+	return (stats->submitted - stats->completed);
+}
+
+static uint32_t
 qdma_readl(void *addr)
 {
 	return QDMA_IN(addr);
 }
 
 static void
-qdma_writel(u32 val, void *addr)
+qdma_writel(uint32_t val, void *addr)
 {
 	QDMA_OUT(addr, val);
 }
 
-static u32
+static uint32_t
 qdma_readl_be(void *addr)
 {
 	return QDMA_IN_BE(addr);
 }
 
 static void
-qdma_writel_be(u32 val, void *addr)
+qdma_writel_be(uint32_t val, void *addr)
 {
 	QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
@@ -115,47 +119,48 @@ static void
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-					struct fsl_qdma_queue *queue,
-					int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *csgf_desc;
 	struct fsl_qdma_format *ccdf;
-	int i, j;
+	uint16_t i, j;
 	struct fsl_qdma_format *head;
+	struct fsl_qdma_cmpd_ft *ft;
+	struct fsl_qdma_df *df;
 
-	head = queue->virt_head;
+	head = queue->cmd_desc;
 
-	for (i = 0; i < (int)(queue->n_cq); i++) {
-		dma_addr_t bus_addr = 0, desc_bus_addr = 0;
+	for (i = 0; i < queue->n_cq; i++) {
+		dma_addr_t phy_ft = 0, phy_df = 0;
 
-		queue->virt_addr[i] =
-		dma_pool_alloc(size, aligned, &bus_addr);
-		if (!queue->virt_addr[i])
+		queue->ft[i] =
+			dma_pool_alloc(sizeof(struct fsl_qdma_cmpd_ft),
+				RTE_CACHE_LINE_SIZE, &phy_ft);
+		if (!queue->ft[i])
 			goto fail;
 
-		queue->desc_virt_addr[i] =
-		dma_pool_alloc(size, aligned, &desc_bus_addr);
-		if (!queue->desc_virt_addr[i]) {
-			rte_free(queue->virt_addr[i]);
+		queue->df[i] =
+			dma_pool_alloc(sizeof(struct fsl_qdma_df),
+				RTE_CACHE_LINE_SIZE, &phy_df);
+		if (!queue->df[i]) {
+			rte_free(queue->ft[i]);
 			goto fail;
 		}
 
-		memset(queue->virt_addr[i], 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(queue->desc_virt_addr[i], 0,
-		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
+		memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
+		memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
 
-		csgf_desc = (struct fsl_qdma_format *)queue->virt_addr[i] +
-			    QDMA_DESC_OFF;
-		sdf = (struct fsl_qdma_sdf *)queue->desc_virt_addr[i];
-		ddf = (struct fsl_qdma_ddf *)sdf + QDMA_DESC_OFF;
+		ft = queue->ft[i];
+		df = queue->df[i];
+		sdf = &df->sdf;
+		ddf = &df->ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, desc_bus_addr);
+		qdma_desc_addr_set64(&ft->desc_buf, phy_df);
 
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(csgf_desc, 32);
+		qdma_csgf_set_len(&ft->desc_buf,
+			sizeof(struct fsl_qdma_df));
 		/* Descriptor Buffer */
 		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
@@ -167,73 +172,72 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
 				FSL_QDMA_CMD_LWC_OFFSET);
 
-		ccdf = (struct fsl_qdma_format *)queue->virt_head;
-		qdma_desc_addr_set64(ccdf, bus_addr + 16);
-		qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(queue->virt_addr[i]));
-		qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(queue->virt_addr[i]));
-		queue->virt_head++;
+		ccdf = queue->cmd_desc;
+		qdma_desc_addr_set64(ccdf, phy_ft);
+		qdma_ccdf_set_format(ccdf, 0);
+		qdma_ccdf_set_ser(ccdf, 0);
+		queue->cmd_desc++;
 	}
-	queue->virt_head = head;
+	queue->cmd_desc = head;
 	queue->ci = 0;
 
 	return 0;
 
 fail:
 	for (j = 0; j < i; j++) {
-		rte_free(queue->virt_addr[j]);
-		rte_free(queue->desc_virt_addr[j]);
+		rte_free(queue->ft[j]);
+		rte_free(queue->df[j]);
 	}
 
 	return -ENOMEM;
 }
 
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, int k, int b)
+static struct fsl_qdma_queue *
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+	int queue_id, int block_id)
 {
 	struct fsl_qdma_queue *queue_temp;
+	uint32_t queue_size;
 
-	queue_temp = rte_zmalloc("qdma: queue head", sizeof(*queue_temp), 0);
+	queue_temp = rte_zmalloc("qdma: queue head",
+		sizeof(struct fsl_qdma_queue), 0);
 	if (!queue_temp) {
-		printf("no memory to allocate queues\n");
+		DPAA_QDMA_ERR("no memory to allocate queues\n");
 		return NULL;
 	}
+	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
 
-	queue_temp->cq =
-	dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-		       QDMA_QUEUE_SIZE,
-		       sizeof(struct fsl_qdma_format) *
-		       QDMA_QUEUE_SIZE, &queue_temp->bus_addr);
-
+	queue_temp->cq = dma_pool_alloc(queue_size,
+		queue_size, &queue_temp->bus_addr);
 	if (!queue_temp->cq) {
 		rte_free(queue_temp);
 		return NULL;
 	}
 
-	memset(queue_temp->cq, 0x0, QDMA_QUEUE_SIZE *
-	       sizeof(struct fsl_qdma_format));
+	memset(queue_temp->cq, 0x0, queue_size);
 
-	queue_temp->queue_base = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, b);
+	queue_temp->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 	queue_temp->n_cq = QDMA_QUEUE_SIZE;
-	queue_temp->id = k;
+	queue_temp->queue_id = queue_id;
 	queue_temp->pending = 0;
-	queue_temp->virt_head = queue_temp->cq;
-	queue_temp->virt_addr = rte_malloc("queue virt addr",
+	queue_temp->cmd_desc = queue_temp->cq;
+	queue_temp->ft = rte_malloc("Compound Frame Table",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->virt_addr) {
+	if (!queue_temp->ft) {
 		rte_free(queue_temp->cq);
 		rte_free(queue_temp);
 		return NULL;
 	}
-	queue_temp->desc_virt_addr = rte_malloc("queue desc virt addr",
+	queue_temp->df = rte_malloc("Descriptor Buffer",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->desc_virt_addr) {
-		rte_free(queue_temp->virt_addr);
+	if (!queue_temp->df) {
+		rte_free(queue_temp->ft);
 		rte_free(queue_temp->cq);
 		rte_free(queue_temp);
 		return NULL;
 	}
-	queue_temp->stats = (struct rte_dma_stats){0};
+	memset(&queue_temp->stats, 0, sizeof(struct rte_dma_stats));
 
 	return queue_temp;
 }
@@ -241,45 +245,43 @@ static struct fsl_qdma_queue
 static void
 fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 {
-	rte_free(queue->desc_virt_addr);
-	rte_free(queue->virt_addr);
+	rte_free(queue->df);
+	rte_free(queue->ft);
 	rte_free(queue->cq);
 	rte_free(queue);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
-					   u32 id)
+static struct fsl_qdma_queue *
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+	uint32_t block_id)
 {
 	struct fsl_qdma_queue *status_head;
-	unsigned int status_size;
+	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE;
+	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
 
-	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
+	status_head = rte_zmalloc("qdma: status head",
+		sizeof(*status_head), 0);
 	if (!status_head)
 		return NULL;
 
 	/*
 	 * Buffer for queue command
 	 */
-	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 &status_head->bus_addr);
+	status_head->cq = dma_pool_alloc(status_size,
+		status_size, &status_head->bus_addr);
 
 	if (!status_head->cq) {
 		rte_free(status_head);
 		return NULL;
 	}
 
-	memset(status_head->cq, 0x0, status_size *
-	       sizeof(struct fsl_qdma_format));
+	memset(status_head->cq, 0x0, status_size);
 	status_head->n_cq = status_size;
-	status_head->virt_head = status_head->cq;
-	status_head->queue_base = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
+	status_head->cmd_desc = status_head->cq;
+	status_head->block_id = block_id;
+	status_head->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
 	return status_head;
 }
@@ -294,11 +296,11 @@ fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
 static int
 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 {
-	void *ctrl = fsl_qdma->ctrl_base;
-	void *block;
+	uint8_t *ctrl = fsl_qdma->ctrl_base;
+	uint8_t *block;
 	int i, count = RETRIES;
 	unsigned int j;
-	u32 reg;
+	uint32_t reg;
 
 	/* Disable the command queue and wait for idle state. */
 	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
@@ -337,11 +339,13 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
-				 enum rte_dma_status_code *status)
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
+	const uint16_t nb_cpls,
+	enum rte_dma_status_code *status)
 {
-	u32 reg;
+	uint32_t reg;
 	int count = 0;
+	uint8_t *block = fsl_queue->block_vir;
 
 	while (count < nb_cpls) {
 		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
@@ -351,9 +355,11 @@ fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
 		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
+		fsl_queue->complete++;
+		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
+			fsl_queue->complete = 0;
 
 		count++;
-
 	}
 	return count;
 }
@@ -363,9 +369,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
 	struct fsl_qdma_queue *temp;
 	void *ctrl = fsl_qdma->ctrl_base;
-	void *block;
-	u32 i, j;
-	u32 reg;
+	uint8_t *block;
+	uint32_t i, j, k = 0;
+	uint32_t reg;
 	int ret, val;
 
 	/* Try to halt the qDMA engine first. */
@@ -375,13 +381,11 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		return ret;
 	}
 
-	int k = 0;
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		k = 0;
-		for (i = (j * QDMA_QUEUES); i < ((j * QDMA_QUEUES) + QDMA_QUEUES); i++) {
-			temp = fsl_qdma->queue[i];
+		for (i = 0; i < QDMA_QUEUES; i++) {
+			temp = fsl_qdma->queue[k];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -391,19 +395,20 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			 */
 
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQDPA_SADDR(k));
+				    block + FSL_QDMA_BCQDPA_SADDR(i));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEDPA_SADDR(k));
+				    block + FSL_QDMA_BCQEDPA_SADDR(i));
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEPA_SADDR(k));
+				    block + FSL_QDMA_BCQEPA_SADDR(i));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEEPA_SADDR(k));
+				    block + FSL_QDMA_BCQEEPA_SADDR(i));
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
-			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-			qdma_writel(reg, block + FSL_QDMA_BCQMR(k));
+			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+			temp->le_cqmr = reg;
+			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
 			k++;
 		}
 
@@ -423,18 +428,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEEPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEPAR);
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEDPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQDPAR);
+		temp = fsl_qdma->status[j];
+		qdma_writel(upper_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEEPAR);
+		qdma_writel(lower_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEPAR);
+		qdma_writel(upper_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEDPAR);
+		qdma_writel(lower_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -443,7 +445,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+		val = ilog2_qsize(temp->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -455,52 +457,51 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-
 static int
 fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
-				  uint64_t flags, dma_addr_t dst,
-				  dma_addr_t src, size_t len)
+	uint64_t flags, dma_addr_t dst,
+	dma_addr_t src, size_t len)
 {
-	void *block = fsl_queue->queue_base;
+	uint8_t *block = fsl_queue->block_vir;
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
-	u32 cfg = 0;
+	uint32_t cfg = 0;
 #endif
 
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	u32 reg;
+	uint32_t reg;
 
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
 	 */
 	reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->id));
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
 		DPAA_QDMA_ERR("QDMA Engine is busy\n");
-		return -1;
+		return -EBUSY;
 	}
 #else
 	/* check whether critical watermark level reached,
 	 * below check is valid for only single queue per block
 	 */
-	if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
-			>= QDMA_QUEUE_CR_WM) {
+	if (fsl_qdma_queue_bd_in_hw(fsl_queue) >= QDMA_QUEUE_CR_WM) {
 		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
-		return -1;
+		return -ENOSPC;
 	}
 #endif
+
 	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
 		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
-		return -1;
+		return -ENOSPC;
 	}
 
-	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
-		   QDMA_SGF_SRC_OFF;
-	csgf_dest = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
-		    QDMA_SGF_DST_OFF;
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = (struct fsl_qdma_sdf *)fsl_queue->desc_virt_addr[fsl_queue->ci];
+	sdf = fsl_queue->df[fsl_queue->ci];
 	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			FSL_QDMA_CMD_RWTTYPE_OFFSET);
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
@@ -527,67 +528,57 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 		fsl_queue->ci = 0;
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		qdma_writel_be(FSL_QDMA_BCQMR_EI,
-			       block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
 		fsl_queue->stats.submitted++;
 	} else {
 		fsl_queue->pending++;
 	}
-	return 0;
+
+	if (fsl_queue->ci)
+		return fsl_queue->ci - 1;
+	else
+		return fsl_queue->n_cq;
 }
 
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	      uint32_t info_sz)
+	__rte_unused uint32_t info_sz)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
-
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 4;
-	dev_info->max_desc = DPAADMA_MAX_DESC;
-	dev_info->min_desc = DPAADMA_MIN_DESC;
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = fsl_qdma->n_queues;
+	dev_info->max_desc = QDMA_QUEUE_SIZE;
+	dev_info->min_desc = QDMA_QUEUE_SIZE;
 
 	return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+	uint16_t vchan)
 {
-	u32 i;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue;
-
-	if (fsl_qdma->free_block_id == QDMA_BLOCKS) {
-		DPAA_QDMA_ERR("Maximum 4 queues can be configured\n");
-		return -1;
-	}
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-	i = fsl_qdma->free_block_id * QDMA_QUEUES;
+	if (fsl_queue->used)
+		return 0;
 
-	fsl_queue = fsl_qdma->queue[i];
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-			FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
 	if (ret)
 		return ret;
 
-	fsl_qdma->vchan_map[vchan] = i;
-	fsl_qdma->free_block_id++;
+	fsl_queue->used = 1;
+
 	return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-		    __rte_unused const struct rte_dma_conf *dev_conf,
-		    __rte_unused uint32_t conf_sz)
+	__rte_unused const struct rte_dma_conf *dev_conf,
+	__rte_unused uint32_t conf_sz)
 {
 	return 0;
 }
@@ -606,9 +597,9 @@ dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
 
 static int
 dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
-		      uint16_t vchan,
-		      __rte_unused const struct rte_dma_vchan_conf *conf,
-		      __rte_unused uint32_t conf_sz)
+	uint16_t vchan,
+	__rte_unused const struct rte_dma_vchan_conf *conf,
+	__rte_unused uint32_t conf_sz)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
 
@@ -618,13 +609,13 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-	void *block = fsl_queue->queue_base;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	void *block = fsl_queue->block_vir;
 
 	while (fsl_queue->pending) {
-		qdma_writel_be(FSL_QDMA_BCQMR_EI, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
 		fsl_queue->pending--;
 		fsl_queue->stats.submitted++;
 	}
@@ -634,37 +625,31 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-		  rte_iova_t src, rte_iova_t dst,
-		  uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-	int ret, idx;
-
-	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	int ret;
 
-	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, (dma_addr_t)dst, (dma_addr_t)src, length);
-	if (ret < 0)
-		return ret;
+	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
 
-	return idx;
+	return ret;
 }
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			 const uint16_t nb_cpls, uint16_t *last_idx,
-			 enum rte_dma_status_code *st)
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 	void *status = fsl_qdma->status_base;
 	int intr;
 
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-					       nb_cpls, st);
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+			nb_cpls, st);
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 		if (intr) {
@@ -690,8 +675,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	}
 
 	fsl_queue->stats.completed += ret;
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+	if (last_idx) {
+		if (unlikely(!fsl_queue->complete))
+			*last_idx = fsl_queue->n_cq - 1;
+		else
+			*last_idx = fsl_queue->complete - 1;
+	}
 
 	return ret;
 }
@@ -699,21 +688,20 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-		  uint16_t vchan, const uint16_t nb_cpls,
-		  uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
 	int intr;
 #endif
 
 	*has_error = false;
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-					       nb_cpls, NULL);
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+			nb_cpls, NULL);
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
@@ -740,23 +728,27 @@ dpaa_qdma_dequeue(void *dev_private,
 	}
 #endif
 	fsl_queue->stats.completed += ret;
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+	if (last_idx) {
+		if (unlikely(!fsl_queue->complete))
+			*last_idx = fsl_queue->n_cq - 1;
+		else
+			*last_idx = fsl_queue->complete - 1;
+	}
+
 	return ret;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
 		return -EINVAL;
-	if (rte_stats == NULL)
+	if (!rte_stats)
 		return -EINVAL;
 
 	*rte_stats = *stats;
@@ -768,10 +760,9 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-	fsl_queue->stats = (struct rte_dma_stats){0};
+	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
 	return 0;
 }
@@ -779,9 +770,8 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
-	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	const struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -804,8 +794,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i, k = 0;
-	int j;
+	uint32_t i, j, k;
 
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
@@ -814,47 +803,63 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		return -1;
+		return ccsr_qdma_fd;
 	}
 
-	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 	phys_addr = QDMA_CCSR_BASE;
-	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-					 PROT_WRITE, MAP_SHARED,
-					 ccsr_qdma_fd, phys_addr);
+	fsl_qdma->reg_base = mmap(NULL, regs_size,
+		PROT_READ | PROT_WRITE, MAP_SHARED,
+		ccsr_qdma_fd, phys_addr);
 
 	close(ccsr_qdma_fd);
-	if (fsl_qdma->ctrl_base == MAP_FAILED) {
-		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-		       "size %d\n", phys_addr, regs_size);
-		return -1;
+	if (fsl_qdma->reg_base == MAP_FAILED) {
+		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIu64"), size(%d)",
+			phys_addr, regs_size);
+		return -ENOMEM;
 	}
 
-	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-	fsl_qdma->status = rte_malloc("status queue", sizeof(struct fsl_qdma_queue) * 4, 0);
-	if (!fsl_qdma->status)
+	fsl_qdma->ctrl_base =
+		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+	fsl_qdma->status_base =
+		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+	fsl_qdma->block_base =
+		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+	fsl_qdma->status = rte_malloc("status queue",
+		sizeof(struct fsl_qdma_queue) * QDMA_BLOCKS, 0);
+	if (!fsl_qdma->status) {
+		ret = -ENOMEM;
 		goto err;
+	}
 
-	fsl_qdma->queue = rte_malloc("cmd queue", sizeof(struct fsl_qdma_queue) * 32, 0);
+	fsl_qdma->queue = rte_malloc("cmd queue",
+		sizeof(struct fsl_qdma_queue) * fsl_qdma->n_queues, 0);
 	if (!fsl_qdma->queue) {
 		rte_free(fsl_qdma->status);
+		ret = -ENOMEM;
 		goto err;
 	}
 
+	k = 0;
 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
-		if (!fsl_qdma->status[i])
+		fsl_qdma->status[i] =
+			fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (!fsl_qdma->status[i]) {
+			ret = -ENOMEM;
 			goto mem_free;
+		}
 		j = 0;
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++) {
-			fsl_qdma->queue[k] = fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
-			if (!fsl_qdma->queue[k])
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_qdma->queue[k] =
+				fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
+			if (!fsl_qdma->queue[k]) {
+				ret = -ENOMEM;
 				goto mem_free;
-			j++;
+			}
+			k++;
 		}
-
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
@@ -867,17 +872,20 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	return 0;
 
 mem_free:
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
-			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
-		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-	}
-	rte_free(fsl_qdma->status);
+	for (j = 0; j < k; j++)
+		fsl_qdma_free_queue_resources(fsl_qdma->queue[j]);
+
+	for (j = 0; j < i; j++)
+		fsl_qdma_free_status_queue(fsl_qdma->status[j]);
+
+	if (fsl_qdma->status)
+		rte_free(fsl_qdma->status);
 err:
-	rte_free(fsl_qdma->queue);
+	if (fsl_qdma->queue)
+		rte_free(fsl_qdma->queue);
 	munmap(fsl_qdma->ctrl_base, regs_size);
 
-	return -1;
+	return ret;
 }
 
 static int
@@ -921,13 +929,13 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	uint32_t i, k;
+	uint32_t i;
 
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
-			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
 		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-	}
+
+	for (i = 0; i < fsl_qdma->num_blocks * QDMA_QUEUES; i++)
+		fsl_qdma_free_queue_resources(fsl_qdma->queue[i]);
 
 	rte_free(fsl_qdma->queue);
 	rte_free(fsl_qdma->status);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 8a4517a70a..25954ef3a4 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -11,7 +11,6 @@
 #define BIT(nr)		(1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES	5
 
 #ifndef GENMASK
@@ -20,6 +19,12 @@
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
 #define FSL_QDMA_DEDR			0xe04
@@ -54,19 +59,18 @@
 #define FSL_QDMA_QUEUE_MAX		8
 
 #define FSL_QDMA_BCQMR_EN		0x80000000
-#define FSL_QDMA_BCQMR_EI_BE		0x40
+#define FSL_QDMA_BCQMR_EI		0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
 /* Update the value appropriately whenever QDMA_QUEUE_SIZE
  * changes.
  */
-#define FSL_QDMA_BCQMR_EI		0x20c0
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
-#define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
 #define FSL_QDMA_BSQMR_DI		0xc0
 
@@ -75,8 +79,6 @@
 #define FSL_QDMA_DMR_DQD		0x40000000
 #define FSL_QDMA_DSR_DB			0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE	64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
@@ -106,16 +108,11 @@
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		64
+#define QDMA_STATUS_SIZE		QDMA_QUEUE_SIZE
 #define QDMA_CCSR_BASE			0x8380000
-#define VIRT_CHANNELS			32
 #define QDMA_BLOCK_OFFSET		0x10000
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
-#define QDMA_DELAY			1000
-#define QDMA_SGF_SRC_OFF		2
-#define QDMA_SGF_DST_OFF		3
-#define QDMA_DESC_OFF			1
 #define QDMA_QUEUE_CR_WM		32
 
 #define QDMA_BIG_ENDIAN			1
@@ -134,64 +131,76 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
 	(((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
 struct fsl_qdma_format {
-	__le32 status; /* ser, status */
-	__le32 cfg;	/* format, offset */
+	uint32_t status; /* ser, status */
+	uint32_t cfg;	/* format, offset */
 	union {
 		struct {
-			__le32 addr_lo;	/* low 32-bits of 40-bit address */
-			u8 addr_hi;	/* high 8-bits of 40-bit address */
-			u8 __reserved1[2];
-			u8 cfg8b_w1; /* dd, queue */
+			uint32_t addr_lo; /* low 32-bits of 40-bit address */
+			uint8_t addr_hi; /* high 8-bits of 40-bit address */
+			uint8_t __reserved1[2];
+			uint8_t cfg8b_w1; /* dd, queue */
 		};
-		__le64 data;
+		uint64_t data;
 	};
 };
 
 /* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	__le32 rev3;
-	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	__le32 rev5;
-	__le32 cmd;
+	uint32_t rev3;
+	uint32_t cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+	uint32_t rev5;
+	uint32_t cmd;
 };
 
 /* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	__le32 rev1;
-	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	__le32 rev3;
-	__le32 cmd;
+	uint32_t rev1;
+	uint32_t cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+	uint32_t rev3;
+	uint32_t cmd;
+};
+
+struct fsl_qdma_df {
+	struct fsl_qdma_sdf sdf;
+	struct fsl_qdma_ddf ddf;
+};
+
+struct fsl_qdma_cmpd_ft {
+	struct fsl_qdma_format desc_buf;
+	struct fsl_qdma_format desc_sbuf;
+	struct fsl_qdma_format desc_dbuf;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format	*virt_head;
-	void                    **virt_addr;
-	u8			ci;
-	u8			n_cq;
-	u8			id;
-	void			*queue_base;
-	struct fsl_qdma_format	*cq;
-	struct rte_dma_stats	stats;
-	u8			pending;
-	dma_addr_t		bus_addr;
-	void			**desc_virt_addr;
+	struct fsl_qdma_format *cmd_desc;
+	int used;
+	struct fsl_qdma_cmpd_ft **ft;
+	uint16_t ci;
+	uint16_t complete;
+	uint16_t n_cq;
+	uint8_t block_id;
+	uint8_t queue_id;
+	void *block_vir;
+	uint32_t le_cqmr;
+	struct fsl_qdma_format *cq;
+	struct rte_dma_stats stats;
+	uint8_t pending;
+	dma_addr_t bus_addr;
+	struct fsl_qdma_df **df;
 };
 
 struct fsl_qdma_engine {
-	void			*ctrl_base;
-	void			*status_base;
-	void			*block_base;
-	u32			n_queues;
-	struct fsl_qdma_queue	**queue;
-	struct fsl_qdma_queue	**status;
-	u32			num_blocks;
-	u8			free_block_id;
-	u32			vchan_map[4];
-	int			block_offset;
+	void *reg_base;
+	void *ctrl_base;
+	void *status_base;
+	void *block_base;
+	uint32_t n_queues;
+	struct fsl_qdma_queue **queue;
+	struct fsl_qdma_queue **status;
+	uint32_t num_blocks;
+	int block_offset;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 21/30] dma/dpaa: dequeue status queue
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (19 preceding siblings ...)
  2024-07-22 11:58   ` [v2 20/30] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
                     ` (9 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

To support multiple command queues of each block, status queue
need support to notify which command queue of block is completed.

The multiple command queues are balanced to blocks in setup.
If multiple command queues are enabled in one block,
de-queue status is performed instead of checking completion.

DQ operation is not performed in silent mode.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 351 +++++++++++++++++++++--------------
 drivers/dma/dpaa/dpaa_qdma.h |  38 +++-
 2 files changed, 242 insertions(+), 147 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index dc17aa4520..825dead5cf 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -34,6 +34,30 @@ qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
 	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
 }
 
+static inline void
+qdma_ccdf_set_queue(struct fsl_qdma_format *ccdf,
+	uint8_t queue_idx)
+{
+	ccdf->queue = queue_idx;
+}
+
+static inline int
+qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
+	uint8_t *queue_idx)
+{
+	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
+
+	if (addr && queue_idx)
+		*queue_idx = ccdf->queue;
+	if (addr) {
+		ccdf->addr_hi = 0;
+		ccdf->addr_lo = 0;
+		return true;
+	}
+
+	return false;
+}
+
 static inline void
 qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
 {
@@ -110,7 +134,8 @@ dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 	if (!virt_addr)
 		return NULL;
 
-	*phy_addr = rte_mem_virt2iova(virt_addr);
+	if (phy_addr)
+		*phy_addr = rte_mem_virt2iova(virt_addr);
 
 	return virt_addr;
 }
@@ -121,6 +146,7 @@ dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 static int
 fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
+	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_format *ccdf;
@@ -175,7 +201,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		ccdf = queue->cmd_desc;
 		qdma_desc_addr_set64(ccdf, phy_ft);
 		qdma_ccdf_set_format(ccdf, 0);
-		qdma_ccdf_set_ser(ccdf, 0);
+		if (!fsl_qdma->is_silent)
+			qdma_ccdf_set_ser(ccdf, 0);
+		qdma_ccdf_set_queue(ccdf, queue->queue_id);
 		queue->cmd_desc++;
 	}
 	queue->cmd_desc = head;
@@ -192,105 +220,91 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	return -ENOMEM;
 }
 
-static struct fsl_qdma_queue *
+static int
 fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 	int queue_id, int block_id)
 {
-	struct fsl_qdma_queue *queue_temp;
+	struct fsl_qdma_queue *cmd_queue;
 	uint32_t queue_size;
 
-	queue_temp = rte_zmalloc("qdma: queue head",
-		sizeof(struct fsl_qdma_queue), 0);
-	if (!queue_temp) {
-		DPAA_QDMA_ERR("no memory to allocate queues\n");
-		return NULL;
-	}
+	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
+	cmd_queue->engine = fsl_qdma;
+
 	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
 
-	queue_temp->cq = dma_pool_alloc(queue_size,
-		queue_size, &queue_temp->bus_addr);
-	if (!queue_temp->cq) {
-		rte_free(queue_temp);
-		return NULL;
-	}
+	cmd_queue->cq = dma_pool_alloc(queue_size,
+		queue_size, &cmd_queue->bus_addr);
+	if (!cmd_queue->cq)
+		return -ENOMEM;
 
-	memset(queue_temp->cq, 0x0, queue_size);
+	memset(cmd_queue->cq, 0x0, queue_size);
 
-	queue_temp->block_vir = fsl_qdma->block_base +
+	cmd_queue->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
-	queue_temp->n_cq = QDMA_QUEUE_SIZE;
-	queue_temp->queue_id = queue_id;
-	queue_temp->pending = 0;
-	queue_temp->cmd_desc = queue_temp->cq;
-	queue_temp->ft = rte_malloc("Compound Frame Table",
+	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
+	cmd_queue->queue_id = queue_id;
+	cmd_queue->block_id = block_id;
+	cmd_queue->pending = 0;
+	cmd_queue->cmd_desc = cmd_queue->cq;
+	cmd_queue->ft = rte_malloc("Compound Frame Table",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->ft) {
-		rte_free(queue_temp->cq);
-		rte_free(queue_temp);
-		return NULL;
+	if (!cmd_queue->ft) {
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
 	}
-	queue_temp->df = rte_malloc("Descriptor Buffer",
+	cmd_queue->df = rte_malloc("Descriptor Buffer",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->df) {
-		rte_free(queue_temp->ft);
-		rte_free(queue_temp->cq);
-		rte_free(queue_temp);
-		return NULL;
+	if (!cmd_queue->df) {
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
 	}
-	memset(&queue_temp->stats, 0, sizeof(struct rte_dma_stats));
+	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
 
-	return queue_temp;
+	return 0;
 }
 
 static void
-fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
+fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 {
 	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
-	rte_free(queue);
 }
 
-static struct fsl_qdma_queue *
+static void
+fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
+{
+	rte_free(queue->cq);
+}
+
+
+static int
 fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	uint32_t block_id)
 {
-	struct fsl_qdma_queue *status_head;
+	struct fsl_qdma_status_queue *status;
 	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
+	status = &fsl_qdma->stat_queues[block_id];
+	status->engine = fsl_qdma;
 
-	status_head = rte_zmalloc("qdma: status head",
-		sizeof(*status_head), 0);
-	if (!status_head)
-		return NULL;
+	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
 
-	/*
-	 * Buffer for queue command
-	 */
-	status_head->cq = dma_pool_alloc(status_size,
-		status_size, &status_head->bus_addr);
+	status->cq = dma_pool_alloc(status_size,
+		status_size, &status->bus_addr);
 
-	if (!status_head->cq) {
-		rte_free(status_head);
-		return NULL;
-	}
+	if (!status->cq)
+		return -ENOMEM;
 
-	memset(status_head->cq, 0x0, status_size);
-	status_head->n_cq = status_size;
-	status_head->cmd_desc = status_head->cq;
-	status_head->block_id = block_id;
-	status_head->block_vir = fsl_qdma->block_base +
+	memset(status->cq, 0x0, status_size);
+	status->n_cq = QDMA_STATUS_SIZE;
+	status->complete = 0;
+	status->block_id = block_id;
+	status->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	return status_head;
-}
-
-static void
-fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
-{
-	rte_free(status->cq);
-	rte_free(status);
+	return 0;
 }
 
 static int
@@ -358,6 +372,7 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
 		fsl_queue->complete++;
 		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
 			fsl_queue->complete = 0;
+		fsl_queue->stats.completed++;
 
 		count++;
 	}
@@ -368,9 +383,10 @@ static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
 	struct fsl_qdma_queue *temp;
+	struct fsl_qdma_status_queue *temp_stat;
 	void *ctrl = fsl_qdma->ctrl_base;
 	uint8_t *block;
-	uint32_t i, j, k = 0;
+	uint32_t i, j;
 	uint32_t reg;
 	int ret, val;
 
@@ -385,7 +401,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 		for (i = 0; i < QDMA_QUEUES; i++) {
-			temp = fsl_qdma->queue[k];
+			temp = &fsl_qdma->cmd_queues[j][i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -409,7 +425,6 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
 			temp->le_cqmr = reg;
 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
-			k++;
 		}
 
 		/*
@@ -419,7 +434,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 */
 
 		qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
-			    block + FSL_QDMA_SQCCMR);
+			block + FSL_QDMA_SQCCMR);
 
 		/*
 		 * Initialize status queue registers to point to the first
@@ -428,14 +443,14 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		temp = fsl_qdma->status[j];
-		qdma_writel(upper_32_bits(temp->bus_addr),
+		temp_stat = &fsl_qdma->stat_queues[j];
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEEPAR);
-		qdma_writel(lower_32_bits(temp->bus_addr),
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEPAR);
-		qdma_writel(upper_32_bits(temp->bus_addr),
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEDPAR);
-		qdma_writel(lower_32_bits(temp->bus_addr),
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
@@ -445,7 +460,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2_qsize(temp->n_cq);
+		val = ilog2_qsize(temp_stat->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -560,8 +575,29 @@ static int
 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 	uint16_t vchan)
 {
-	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	int ret, i, j, found = 0;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	if (fsl_queue) {
+		found = 1;
+		goto queue_found;
+	}
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_queue = &fsl_qdma->cmd_queues[i][j];
+
+			if (fsl_queue->channel_id == vchan) {
+				found = 1;
+				fsl_qdma->chan[vchan] = fsl_queue;
+				goto queue_found;
+			}
+		}
+	}
+
+queue_found:
+	if (!found)
+		return -ENXIO;
 
 	if (fsl_queue->used)
 		return 0;
@@ -571,15 +607,19 @@ dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 		return ret;
 
 	fsl_queue->used = 1;
+	fsl_qdma->block_queues[fsl_queue->block_id]++;
 
 	return 0;
 }
 
 static int
-dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-	__rte_unused const struct rte_dma_conf *dev_conf,
+dpaa_qdma_configure(struct rte_dma_dev *dmadev,
+	const struct rte_dma_conf *dev_conf,
 	__rte_unused uint32_t conf_sz)
 {
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+
+	fsl_qdma->is_silent = dev_conf->enable_silent;
 	return 0;
 }
 
@@ -610,7 +650,7 @@ static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *block = fsl_queue->block_vir;
 
 	while (fsl_queue->pending) {
@@ -629,7 +669,7 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	uint32_t length, uint64_t flags)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	int ret;
 
 	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
@@ -637,6 +677,42 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	return ret;
 }
 
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
+{
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_format *cq;
+	uint16_t start, count = 0;
+	uint8_t qid;
+	int ret;
+
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	do {
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+			cmd_queue->stats.completed++;
+			cmd_queue->complete++;
+			if (unlikely(cmd_queue->complete == cmd_queue->n_cq))
+				cmd_queue->complete = 0;
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
@@ -644,12 +720,22 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
 	int intr;
 
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-			nb_cpls, st);
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode\n");
+		return 0;
+	}
+
+	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
+		ret = dpaa_qdma_block_dequeue(fsl_qdma,
+				fsl_queue->block_id);
+	} else {
+		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+				nb_cpls, st);
+	}
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 		if (intr) {
@@ -674,7 +760,6 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 		}
 	}
 
-	fsl_queue->stats.completed += ret;
 	if (last_idx) {
 		if (unlikely(!fsl_queue->complete))
 			*last_idx = fsl_queue->n_cq - 1;
@@ -693,15 +778,26 @@ dpaa_qdma_dequeue(void *dev_private,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
 	int intr;
 #endif
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode\n");
+
+		return 0;
+	}
+
 	*has_error = false;
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-			nb_cpls, NULL);
+	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
+		ret = dpaa_qdma_block_dequeue(fsl_qdma,
+				fsl_queue->block_id);
+	} else {
+		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+				nb_cpls, NULL);
+	}
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
@@ -727,7 +823,6 @@ dpaa_qdma_dequeue(void *dev_private,
 		}
 	}
 #endif
-	fsl_queue->stats.completed += ret;
 	if (last_idx) {
 		if (unlikely(!fsl_queue->complete))
 			*last_idx = fsl_queue->n_cq - 1;
@@ -743,7 +838,7 @@ dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
 	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -760,7 +855,7 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
 	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
@@ -771,7 +866,7 @@ static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
 	const struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -827,37 +922,19 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	fsl_qdma->block_base =
 		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
 
-	fsl_qdma->status = rte_malloc("status queue",
-		sizeof(struct fsl_qdma_queue) * QDMA_BLOCKS, 0);
-	if (!fsl_qdma->status) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	fsl_qdma->queue = rte_malloc("cmd queue",
-		sizeof(struct fsl_qdma_queue) * fsl_qdma->n_queues, 0);
-	if (!fsl_qdma->queue) {
-		rte_free(fsl_qdma->status);
-		ret = -ENOMEM;
-		goto err;
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (ret)
+			goto mem_free;
 	}
 
 	k = 0;
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] =
-			fsl_qdma_prep_status_queue(fsl_qdma, i);
-		if (!fsl_qdma->status[i]) {
-			ret = -ENOMEM;
-			goto mem_free;
-		}
-		j = 0;
-		for (j = 0; j < QDMA_QUEUES; j++) {
-			fsl_qdma->queue[k] =
-				fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
-			if (!fsl_qdma->queue[k]) {
-				ret = -ENOMEM;
+	for (i = 0; i < QDMA_QUEUES; i++) {
+		for (j = 0; j < QDMA_BLOCKS; j++) {
+			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
+			if (ret)
 				goto mem_free;
-			}
+			fsl_qdma->cmd_queues[j][i].channel_id = k;
 			k++;
 		}
 	}
@@ -865,24 +942,20 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
-		rte_free(fsl_qdma->status);
 		goto mem_free;
 	}
 
 	return 0;
 
 mem_free:
-	for (j = 0; j < k; j++)
-		fsl_qdma_free_queue_resources(fsl_qdma->queue[j]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-	for (j = 0; j < i; j++)
-		fsl_qdma_free_status_queue(fsl_qdma->status[j]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
 
-	if (fsl_qdma->status)
-		rte_free(fsl_qdma->status);
-err:
-	if (fsl_qdma->queue)
-		rte_free(fsl_qdma->queue);
 	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	return ret;
@@ -929,16 +1002,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	uint32_t i;
+	uint32_t i, j, regs_size;
 
-	for (i = 0; i < fsl_qdma->num_blocks; i++)
-		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 
-	for (i = 0; i < fsl_qdma->num_blocks * QDMA_QUEUES; i++)
-		fsl_qdma_free_queue_resources(fsl_qdma->queue[i]);
+	for (i = 0; i < QDMA_BLOCKS; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-	rte_free(fsl_qdma->queue);
-	rte_free(fsl_qdma->status);
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
+
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 25954ef3a4..da2dbf36c9 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -107,13 +107,13 @@
 #define COMMAND_QUEUE_OVERFLOW		10
 
 /* qdma engine attribute */
-#define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		QDMA_QUEUE_SIZE
-#define QDMA_CCSR_BASE			0x8380000
-#define QDMA_BLOCK_OFFSET		0x10000
-#define QDMA_BLOCKS			4
-#define QDMA_QUEUES			8
-#define QDMA_QUEUE_CR_WM		32
+#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
+#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
+#define QDMA_CCSR_BASE 0x8380000
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
+#define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -140,7 +140,9 @@ struct fsl_qdma_format {
 			uint32_t addr_lo; /* low 32-bits of 40-bit address */
 			uint8_t addr_hi; /* high 8-bits of 40-bit address */
 			uint8_t __reserved1[2];
-			uint8_t cfg8b_w1; /* dd, queue */
+			uint8_t queue:3;
+			uint8_t rsv:3;
+			uint8_t dd:2;
 		};
 		uint64_t data;
 	};
@@ -182,6 +184,7 @@ struct fsl_qdma_queue {
 	uint16_t n_cq;
 	uint8_t block_id;
 	uint8_t queue_id;
+	uint8_t channel_id;
 	void *block_vir;
 	uint32_t le_cqmr;
 	struct fsl_qdma_format *cq;
@@ -189,6 +192,18 @@ struct fsl_qdma_queue {
 	uint8_t pending;
 	dma_addr_t bus_addr;
 	struct fsl_qdma_df **df;
+	void *engine;
+};
+
+struct fsl_qdma_status_queue {
+	uint16_t n_cq;
+	uint16_t complete;
+	uint8_t block_id;
+	void *block_vir;
+	struct fsl_qdma_format *cq;
+	struct rte_dma_stats stats;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
 struct fsl_qdma_engine {
@@ -197,10 +212,13 @@ struct fsl_qdma_engine {
 	void *status_base;
 	void *block_base;
 	uint32_t n_queues;
-	struct fsl_qdma_queue **queue;
-	struct fsl_qdma_queue **status;
+	uint8_t block_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES];
+	struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
 	uint32_t num_blocks;
 	int block_offset;
+	int is_silent;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 22/30] dma/dpaa: add Scatter Gather support
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (20 preceding siblings ...)
  2024-07-22 11:58   ` [v2 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 23/30] dma/dpaa: block dequeue Gagandeep Singh
                     ` (8 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Perform SG operation by copy_sg callback of DMA lib or
burst request from application.
Perform Simple operation if burst number is 1.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 856 ++++++++++++++++++++++++++---------
 drivers/dma/dpaa/dpaa_qdma.h | 184 +++++---
 2 files changed, 763 insertions(+), 277 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 825dead5cf..f1ad60d1f2 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,45 +4,31 @@
 
 #include <bus_dpaa_driver.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_pmd_dpaax_qdma.h>
 
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static int s_data_validation;
+static int s_hw_err_check;
+static int s_sg_disable;
+
 static inline void
-qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
 {
 	ccdf->addr_hi = upper_32_bits(addr);
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline void
-qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-{
-	ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
-}
-
-static inline int
-qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
-{
-	return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_STATUS;
-}
-
-static inline void
-qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
 {
-	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
-}
-
-static inline void
-qdma_ccdf_set_queue(struct fsl_qdma_format *ccdf,
-	uint8_t queue_idx)
-{
-	ccdf->queue = queue_idx;
+	sge->addr_hi = upper_32_bits(addr);
+	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline int
-qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
+qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
 	uint8_t *queue_idx)
 {
 	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
@@ -58,18 +44,6 @@ qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
 	return false;
 }
 
-static inline void
-qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
-{
-	csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
-}
-
-static inline void
-qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
-{
-	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
-}
-
 static inline int ilog2(int x)
 {
 	int log = 0;
@@ -126,11 +100,11 @@ qdma_writel_be(uint32_t val, void *addr)
 }
 
 static void *
-dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
-	virt_addr = rte_malloc("dma pool alloc", size, aligned);
+	virt_addr = rte_zmalloc(nm, size, aligned);
 	if (!virt_addr)
 		return NULL;
 
@@ -149,28 +123,46 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *ccdf;
+	struct fsl_qdma_comp_cmd_desc *ccdf;
 	uint16_t i, j;
-	struct fsl_qdma_format *head;
 	struct fsl_qdma_cmpd_ft *ft;
 	struct fsl_qdma_df *df;
 
-	head = queue->cmd_desc;
-
 	for (i = 0; i < queue->n_cq; i++) {
 		dma_addr_t phy_ft = 0, phy_df = 0;
 
-		queue->ft[i] =
-			dma_pool_alloc(sizeof(struct fsl_qdma_cmpd_ft),
-				RTE_CACHE_LINE_SIZE, &phy_ft);
+		queue->ft[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_cmpd_ft),
+			RTE_CACHE_LINE_SIZE, &phy_ft);
 		if (!queue->ft[i])
 			goto fail;
-
-		queue->df[i] =
-			dma_pool_alloc(sizeof(struct fsl_qdma_df),
-				RTE_CACHE_LINE_SIZE, &phy_df);
+		if (((uint64_t)queue->ft[i]) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
+				i, queue->ft[i]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
+			goto fail;
+		}
+		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
+				i, &queue->ft[i]->desc_ssge[0]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
+			goto fail;
+		}
+		queue->ft[i]->phy_ssge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
+		queue->ft[i]->phy_dsge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
+
+		queue->df[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_df),
+			RTE_CACHE_LINE_SIZE, &phy_df);
 		if (!queue->df[i]) {
 			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
 
@@ -182,31 +174,25 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		sdf = &df->sdf;
 		ddf = &df->ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(&ft->desc_buf, phy_df);
-
+		qdma_desc_sge_addr_set64(&ft->desc_buf, phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(&ft->desc_buf,
-			sizeof(struct fsl_qdma_df));
+		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
+
 		/* Descriptor Buffer */
-		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+		sdf->prefetch = 1;
 #endif
-		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
-				FSL_QDMA_CMD_LWC_OFFSET);
+		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
+		ddf->lwc = FSL_QDMA_CMD_LWC;
 
-		ccdf = queue->cmd_desc;
+		ccdf = &queue->cq[i];
 		qdma_desc_addr_set64(ccdf, phy_ft);
-		qdma_ccdf_set_format(ccdf, 0);
+		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
 		if (!fsl_qdma->is_silent)
-			qdma_ccdf_set_ser(ccdf, 0);
-		qdma_ccdf_set_queue(ccdf, queue->queue_id);
-		queue->cmd_desc++;
+			ccdf->ser = 1;
+		ccdf->queue = queue->queue_id;
 	}
-	queue->cmd_desc = head;
 	queue->ci = 0;
 
 	return 0;
@@ -226,40 +212,107 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 {
 	struct fsl_qdma_queue *cmd_queue;
 	uint32_t queue_size;
+	char nm[RTE_MEMZONE_NAMESIZE];
 
 	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
 	cmd_queue->engine = fsl_qdma;
 
-	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
+	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
+		QDMA_QUEUE_SIZE;
 
-	cmd_queue->cq = dma_pool_alloc(queue_size,
+	sprintf(nm, "Command queue_%d_%d",
+		block_id, queue_id);
+	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
 		queue_size, &cmd_queue->bus_addr);
-	if (!cmd_queue->cq)
+	if (!cmd_queue->cq) {
+		DPAA_QDMA_ERR("%s alloc failed!", nm);
 		return -ENOMEM;
-
-	memset(cmd_queue->cq, 0x0, queue_size);
+	}
 
 	cmd_queue->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
 	cmd_queue->queue_id = queue_id;
 	cmd_queue->block_id = block_id;
-	cmd_queue->pending = 0;
-	cmd_queue->cmd_desc = cmd_queue->cq;
-	cmd_queue->ft = rte_malloc("Compound Frame Table",
+	cmd_queue->pending_start = 0;
+	cmd_queue->pending_num = 0;
+	cmd_queue->complete_start = 0;
+
+	sprintf(nm, "Compound Table_%d_%d",
+		block_id, queue_id);
+	cmd_queue->ft = rte_zmalloc(nm,
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
 	if (!cmd_queue->ft) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
 	}
-	cmd_queue->df = rte_malloc("Descriptor Buffer",
+	sprintf(nm, "Descriptor Buf_%d_%d",
+		block_id, queue_id);
+	cmd_queue->df = rte_zmalloc(nm,
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
 	if (!cmd_queue->df) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "Pending_desc_%d_%d",
+		block_id, queue_id);
+	cmd_queue->pending_desc = rte_zmalloc(nm,
+		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
+	if (!cmd_queue->pending_desc) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->df);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-burst_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_burst = rte_ring_create(nm,
+		QDMA_QUEUE_SIZE * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_burst) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
+	sprintf(nm, "complete-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_desc = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_desc) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-pool-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_pool = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_pool) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_desc);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
+		return -ENOMEM;
+	}
+
 	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
+	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
 
 	return 0;
 }
@@ -270,6 +323,10 @@ fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
+	rte_free(queue->pending_desc);
+	rte_ring_free(queue->complete_burst);
+	rte_ring_free(queue->complete_desc);
+	rte_ring_free(queue->complete_pool);
 }
 
 static void
@@ -289,9 +346,10 @@ fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	status = &fsl_qdma->stat_queues[block_id];
 	status->engine = fsl_qdma;
 
-	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
+	status_size = QDMA_STATUS_SIZE *
+		sizeof(struct fsl_qdma_comp_cmd_desc);
 
-	status->cq = dma_pool_alloc(status_size,
+	status->cq = dma_pool_alloc(NULL, status_size,
 		status_size, &status->bus_addr);
 
 	if (!status->cq)
@@ -352,31 +410,116 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
+static void
+fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
+	uint8_t num, struct fsl_qdma_queue *fsl_queue)
+{
+	uint32_t i, j;
+	uint8_t *v_src, *v_dst;
+	char err_msg[512];
+	int offset;
+
+	if (likely(!s_data_validation))
+		return;
+
+	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
+		fsl_queue->block_id,
+		fsl_queue->queue_id);
+	for (i = 0; i < num; i++) {
+		v_src = rte_mem_iova2virt(desc[i]->src);
+		v_dst = rte_mem_iova2virt(desc[i]->dst);
+		for (j = 0; j < desc[i]->len; j++) {
+			if (v_src[j] != v_dst[j]) {
+				sprintf(&err_msg[offset],
+					"job[%"PRId64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
+					desc[i]->flag, v_src, j, v_src[j],
+					v_dst, j, v_dst[j]);
+				DPAA_QDMA_ERR("%s, stop validating!\r\n",
+					err_msg);
+				return;
+			}
+		}
+	}
+}
+
 static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
-	const uint16_t nb_cpls,
-	enum rte_dma_status_code *status)
+fsl_qdma_queue_drain(struct fsl_qdma_queue *fsl_queue)
 {
 	uint32_t reg;
-	int count = 0;
+	int count = 0, ret;
 	uint8_t *block = fsl_queue->block_vir;
+	uint16_t *dq_complete = NULL, drain_num = 0;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
 
-	while (count < nb_cpls) {
+	while (1) {
+		if (rte_ring_free_count(fsl_queue->complete_pool) <
+			(FSL_QDMA_SG_MAX_ENTRY * 2))
+			break;
 		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
 		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			return count;
+			break;
 
 		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
-		if (status != NULL)
-			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
-		fsl_queue->complete++;
-		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
-			fsl_queue->complete = 0;
+		ret = rte_ring_dequeue(fsl_queue->complete_burst,
+			(void **)&dq_complete);
+		if (ret)
+			DPAA_QDMA_ERR("DQ desc number failed!\n");
+
+		ret = rte_ring_dequeue_bulk(fsl_queue->complete_desc,
+			(void **)desc, *dq_complete, NULL);
+		if (ret != (*dq_complete)) {
+			DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
+				*dq_complete, ret);
+		}
+
+		fsl_qdma_data_validation(desc, *dq_complete, fsl_queue);
+
+		ret = rte_ring_enqueue_bulk(fsl_queue->complete_pool,
+			(void **)desc, (*dq_complete), NULL);
+		if (ret != (*dq_complete)) {
+			DPAA_QDMA_ERR("EQ %d descs to return queue failed!(%d)\n",
+				*dq_complete, ret);
+		}
+
+		drain_num += *dq_complete;
+		fsl_queue->complete_start =
+			(fsl_queue->complete_start + (*dq_complete)) &
+			(fsl_queue->pending_max - 1);
 		fsl_queue->stats.completed++;
 
 		count++;
 	}
-	return count;
+
+	return drain_num;
+}
+
+static int
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *status)
+{
+	int ret;
+	uint16_t dq_num = 0, i;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+
+	ret = fsl_qdma_queue_drain(fsl_queue);
+	if (ret < 0) {
+		DPAA_QDMA_ERR("Drain TX%d/Q%d failed!(%d)",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			ret);
+	}
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+		(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (status) {
+		for (i = 0; i < dq_num; i++)
+			status[i] = RTE_DMA_STATUS_SUCCESSFUL;
+	}
+
+	return dq_num;
 }
 
 static int
@@ -473,87 +616,253 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
-	uint64_t flags, dma_addr_t dst,
-	dma_addr_t src, size_t len)
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	int is_burst)
+{
+	uint16_t i, num = fsl_queue->pending_num, idx, start;
+	int ret;
+
+	num = is_burst ? fsl_queue->pending_num : 1;
+
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_ERR("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_ERR("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
 {
 	uint8_t *block = fsl_queue->block_vir;
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	struct fsl_qdma_sdf *sdf;
-	uint32_t cfg = 0;
-#endif
+	int ret;
 
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	uint32_t reg;
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0, drain;
+	uint32_t reg, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
 
-	/* retrieve and store the register value in big endian
-	 * to avoid bits swap
-	 */
-	reg = qdma_readl_be(block +
+	check_num = 0;
+overflow_check:
+	if (unlikely(s_hw_err_check)) {
+		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
-		DPAA_QDMA_ERR("QDMA Engine is busy\n");
-		return -EBUSY;
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
+
+	if (likely(!overflow))
+		return 0;
+
+	DPAA_QDMA_ERR("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	drain = fsl_qdma_queue_drain(fsl_queue);
+	if (drain <= 0) {
+		drain_num++;
+		if (drain_num > 100) {
+			DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			return -ENOSPC;
+		}
+		goto drain_again;
 	}
-#else
-	/* check whether critical watermark level reached,
-	 * below check is valid for only single queue per block
-	 */
-	if (fsl_qdma_queue_bd_in_hw(fsl_queue) >= QDMA_QUEUE_CR_WM) {
-		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
+	check_num++;
+	if (check_num > 10) {
+		DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
 		return -ENOSPC;
 	}
+	goto overflow_check;
+
+	return -ENOSPC;
+}
+
+static int
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+{
+	uint8_t *block = fsl_queue->block_vir, i;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	uint32_t total_len = 0;
+	uint8_t num = fsl_queue->pending_num;
+	uint16_t start = fsl_queue->pending_start, idx;
+	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
 #endif
 
-	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
-		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
-		return -ENOSPC;
-	}
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = fsl_queue->df[fsl_queue->ci];
-	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	sdf = &fsl_queue->df[fsl_queue->ci]->sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+	sdf->prefetch = 1;
 #endif
-	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
-		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
-					FSL_QDMA_CFG_SSS_OFFSET |
-					FSL_QDMA_CMD_SSS_DISTANCE);
-		sdf->cfg = cfg;
-	} else
-		sdf->cfg = 0;
 #endif
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-	fsl_queue->ci++;
 
-	if (fsl_queue->ci == fsl_queue->n_cq)
-		fsl_queue->ci = 0;
+	if (num == 1) {
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+		if (fsl_queue->pending_desc[start].len >
+			FSL_QDMA_CMD_SSS_DISTANCE) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+			sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
+		} else {
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
+#endif
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	} else if (s_sg_disable) {
+		while (fsl_queue->pending_num > 0) {
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+			if (fsl_queue->pending_desc[start].len >
+				FSL_QDMA_CMD_SSS_DISTANCE) {
+				sdf->ssen = 1;
+				sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+				sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
+			} else {
+				sdf->sss = 0;
+				sdf->ssd = 0;
+			}
+#endif
+			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+				fsl_queue->pending_desc[start].dst,
+				fsl_queue->pending_desc[start].src,
+				fsl_queue->pending_desc[start].len);
+			if (!ret) {
+				start = (start + 1) &
+					(fsl_queue->pending_max - 1);
+				fsl_queue->pending_start = start;
+				fsl_queue->pending_num--;
+			} else {
+				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
+					ret);
+				return -EIO;
+			}
+		}
 
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-		fsl_queue->stats.submitted++;
+		return 0;
+	}
+	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
+	csgf_src->extion = 1;
+	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
+	csgf_dest->extion = 1;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
+			fsl_queue->pending_desc[idx].src);
+		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_ssge[i].final = 0;
+		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
+			fsl_queue->pending_desc[idx].dst);
+		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_dsge[i].final = 0;
+		total_len += fsl_queue->pending_desc[idx].len;
+	}
+	ft->desc_ssge[num - 1].final = 1;
+	ft->desc_dsge[num - 1].final = 1;
+	csgf_src->length = total_len;
+	csgf_dest->length = total_len;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	if (total_len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+		sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
 	} else {
-		fsl_queue->pending++;
+		sdf->sss = 0;
+		sdf->ssd = 0;
 	}
+#endif
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	if (ret)
+		return ret;
+
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
 
-	if (fsl_queue->ci)
-		return fsl_queue->ci - 1;
-	else
-		return fsl_queue->n_cq;
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	fsl_queue->pending_start =
+		(start + num) & (fsl_queue->pending_max - 1);
+	fsl_queue->pending_num = 0;
+
+	return 0;
 }
 
 static int
@@ -564,8 +873,9 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
 		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = fsl_qdma->n_queues;
-	dev_info->max_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
 	dev_info->min_desc = QDMA_QUEUE_SIZE;
 
 	return 0;
@@ -651,16 +961,11 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
-	void *block = fsl_queue->block_vir;
 
-	while (fsl_queue->pending) {
-		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-		fsl_queue->pending--;
-		fsl_queue->stats.submitted++;
-	}
+	if (!fsl_queue->pending_num)
+		return 0;
 
-	return 0;
+	return fsl_qdma_enqueue_desc(fsl_queue);
 }
 
 static int
@@ -670,9 +975,86 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	uint16_t start = fsl_queue->pending_start;
+	uint8_t pending = fsl_queue->pending_num;
+	uint16_t idx;
 	int ret;
 
-	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
+	if (pending >= FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	idx = (start + pending) & (fsl_queue->pending_max - 1);
+
+	fsl_queue->pending_desc[idx].src = src;
+	fsl_queue->pending_desc[idx].dst = dst;
+	fsl_queue->pending_desc[idx].flag =
+		DPAA_QDMA_IDX_FROM_FLAG(flags);
+	fsl_queue->pending_desc[idx].len = length;
+	fsl_queue->pending_num++;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
+
+	return ret;
+}
+
+static int
+dpaa_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
+{
+	int ret;
+	uint16_t i, start, idx;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
+			__func__, nb_src, nb_dst, vchan);
+		return -EINVAL;
+	}
+
+	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	start = fsl_queue->pending_start + fsl_queue->pending_num;
+	start = start & (fsl_queue->pending_max - 1);
+	idx = start;
+
+	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+
+	for (i = 0; i < nb_src; i++) {
+		if (unlikely(src[i].length != dst[i].length)) {
+			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
+				src[i].length, dst[i].length);
+			return -EINVAL;
+		}
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		fsl_queue->pending_desc[idx].src = src[i].addr;
+		fsl_queue->pending_desc[idx].dst = dst[i].addr;
+		fsl_queue->pending_desc[idx].len = dst[i].length;
+		fsl_queue->pending_desc[idx].flag = idx_addr[i];
+	}
+	fsl_queue->pending_num += nb_src;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
 
 	return ret;
 }
@@ -683,7 +1065,7 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 {
 	struct fsl_qdma_status_queue *stat_queue;
 	struct fsl_qdma_queue *cmd_queue;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
 	uint16_t start, count = 0;
 	uint8_t qid;
 	int ret;
@@ -697,9 +1079,6 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 		if (ret == true) {
 			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
 			cmd_queue->stats.completed++;
-			cmd_queue->complete++;
-			if (unlikely(cmd_queue->complete == cmd_queue->n_cq))
-				cmd_queue->complete = 0;
 			start++;
 			if (unlikely(start == stat_queue->n_cq))
 				start = 0;
@@ -713,16 +1092,81 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	return count;
 }
 
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+	struct fsl_qdma_err_reg local;
+	size_t i, offset = 0;
+	char err_msg[512];
+
+	local.dedr_be = rte_read32(&reg->dedr_be);
+	if (!local.dedr_be)
+		return 0;
+	offset = sprintf(err_msg, "ERR detected:\n");
+	if (local.dedr.ere) {
+		offset += sprintf(&err_msg[offset],
+			" ere(Enqueue rejection error)\n");
+	}
+	if (local.dedr.dde) {
+		offset += sprintf(&err_msg[offset],
+			" dde(Destination descriptor error)\n");
+	}
+	if (local.dedr.sde) {
+		offset += sprintf(&err_msg[offset],
+			" sde(Source descriptor error)\n");
+	}
+	if (local.dedr.cde) {
+		offset += sprintf(&err_msg[offset],
+			" cde(Command descriptor error)\n");
+	}
+	if (local.dedr.wte) {
+		offset += sprintf(&err_msg[offset],
+			" wte(Write transaction error)\n");
+	}
+	if (local.dedr.rte) {
+		offset += sprintf(&err_msg[offset],
+			" rte(Read transaction error)\n");
+	}
+	if (local.dedr.me) {
+		offset += sprintf(&err_msg[offset],
+			" me(Multiple errors of the same type)\n");
+	}
+	DPAA_QDMA_ERR("%s", err_msg);
+	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+			QDMA_IN(&reg->deccd_le[i]);
+	}
+	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
+	local.decbr = rte_read32(&reg->decbr);
+
+	offset = sprintf(err_msg, "ERR command:\n");
+	offset += sprintf(&err_msg[offset],
+		" status: %02x, ser: %d, offset:%d, fmt: %02x\n",
+		local.err_cmd.status, local.err_cmd.ser,
+		local.err_cmd.offset, local.err_cmd.format);
+	offset += sprintf(&err_msg[offset],
+		" address: 0x%"PRIx64", queue: %d, dd: %02x\n",
+		(uint64_t)local.err_cmd.addr_hi << 32 |
+		local.err_cmd.addr_lo,
+		local.err_cmd.queue, local.err_cmd.dd);
+	DPAA_QDMA_ERR("%s", err_msg);
+	DPAA_QDMA_ERR("ERR command block: %d, queue: %d\n",
+		local.deccqidr.block, local.deccqidr.queue);
+
+	rte_write32(local.dedr_be, &reg->dedr_be);
+
+	return -EIO;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret;
+	int ret, err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
-	int intr;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -734,55 +1178,27 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 				fsl_queue->block_id);
 	} else {
 		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, st);
+				nb_cpls, last_idx, st);
 	}
-	if (!ret) {
-		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-		if (intr) {
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECBR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-#endif
-			qdma_writel_be(0xbf,
-				    status + FSL_QDMA_DEDR);
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err)
 			fsl_queue->stats.errors++;
-		}
-	}
-
-	if (last_idx) {
-		if (unlikely(!fsl_queue->complete))
-			*last_idx = fsl_queue->n_cq - 1;
-		else
-			*last_idx = fsl_queue->complete - 1;
 	}
 
 	return ret;
 }
 
-
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret;
+	int ret, err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
-	int intr;
-#endif
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -796,39 +1212,17 @@ dpaa_qdma_dequeue(void *dev_private,
 				fsl_queue->block_id);
 	} else {
 		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, NULL);
+				nb_cpls, last_idx, NULL);
 	}
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	if (!ret) {
-		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-		if (intr) {
-			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECBR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-			qdma_writel_be(0xbf,
-				    status + FSL_QDMA_DEDR);
-			intr = qdma_readl(status + FSL_QDMA_DEDR);
-			*has_error = true;
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err) {
+			if (has_error)
+				*has_error = true;
 			fsl_queue->stats.errors++;
 		}
 	}
-#endif
-	if (last_idx) {
-		if (unlikely(!fsl_queue->complete))
-			*last_idx = fsl_queue->n_cq - 1;
-		else
-			*last_idx = fsl_queue->complete - 1;
-	}
 
 	return ret;
 }
@@ -868,7 +1262,7 @@ dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 	const struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	return fsl_queue->n_cq - fsl_queue->pending;
+	return fsl_queue->pending_max - fsl_queue->pending_num;
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
@@ -891,6 +1285,15 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ret;
 	uint32_t i, j, k;
 
+	if (getenv("DPAA_QDMA_DATA_VALIDATION"))
+		s_data_validation = 1;
+
+	if (getenv("DPAA_QDMA_HW_ERR_CHECK"))
+		s_hw_err_check = 1;
+
+	if (getenv("DPAA_QDMA_SG_DISABLE"))
+		s_sg_disable = 1;
+
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -981,6 +1384,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->device = &dpaa_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
+	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index da2dbf36c9..9d8d9e865d 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -83,29 +83,15 @@
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
 
+#define FSL_QDMA_COMP_SG_FORMAT		0x1
+
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
-#define FSL_QDMA_CMD_LWC_OFFSET		16
-#define FSL_QDMA_CMD_PF			BIT(17)
-
-#define FSL_QDMA_CMD_SSEN		BIT(19)
 #define FSL_QDMA_CFG_SSS_OFFSET		12
 #define FSL_QDMA_CMD_SSS_STRIDE		128
 #define FSL_QDMA_CMD_SSS_DISTANCE	128
 
-#define QDMA_CCDF_STATUS		20
-#define QDMA_CCDF_OFFSET		20
-#define QDMA_CCDF_MASK			GENMASK(28, 20)
-#define QDMA_CCDF_FOTMAT		BIT(29)
-#define QDMA_CCDF_SER			BIT(30)
-
-#define QDMA_SG_FIN			BIT(30)
-#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
-
-#define COMMAND_QUEUE_OVERFLOW		10
-
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
 #define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
@@ -132,64 +118,160 @@
 	(((fsl_qdma_engine)->block_offset) * (x))
 
 /* qDMA Command Descriptor Formats */
-struct fsl_qdma_format {
-	uint32_t status; /* ser, status */
-	uint32_t cfg;	/* format, offset */
-	union {
-		struct {
-			uint32_t addr_lo; /* low 32-bits of 40-bit address */
-			uint8_t addr_hi; /* high 8-bits of 40-bit address */
-			uint8_t __reserved1[2];
-			uint8_t queue:3;
-			uint8_t rsv:3;
-			uint8_t dd:2;
-		};
-		uint64_t data;
-	};
-};
+struct fsl_qdma_comp_cmd_desc {
+	uint8_t status;
+	uint32_t rsv0:22;
+	uint32_t ser:1;
+	uint32_t rsv1:21;
+	uint32_t offset:9;
+	uint32_t format:3;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint16_t rsv3;
+	uint8_t queue:3;
+	uint8_t rsv4:3;
+	uint8_t dd:2;
+} __rte_packed;
+
+struct fsl_qdma_comp_sg_desc {
+	uint32_t offset:13;
+	uint32_t rsv0:19;
+	uint32_t length:30;
+	uint32_t final:1;
+	uint32_t extion:1;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint32_t rsv1:24;
+} __rte_packed;
 
-/* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	uint32_t rev3;
-	uint32_t cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	uint32_t rev5;
-	uint32_t cmd;
-};
+	uint32_t rsv0;
+	uint32_t ssd:12;
+	uint32_t sss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint32_t rsv3:17;
+	uint32_t prefetch:1;
+	uint32_t rsv4:1;
+	uint32_t ssen:1;
+	uint32_t rthrotl:4;
+	uint32_t sqos:3;
+	uint32_t ns:1;
+	uint32_t srttype:4;
+} __rte_packed;
 
-/* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	uint32_t rev1;
-	uint32_t cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	uint32_t rev3;
-	uint32_t cmd;
-};
+	uint32_t rsv0;
+	uint32_t dsd:12;
+	uint32_t dss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint16_t rsv3;
+	uint32_t lwc:2;
+	uint32_t rsv4:1;
+	uint32_t dsen:1;
+	uint32_t wthrotl:4;
+	uint32_t dqos:3;
+	uint32_t ns:1;
+	uint32_t dwttype:4;
+} __rte_packed;
 
 struct fsl_qdma_df {
 	struct fsl_qdma_sdf sdf;
 	struct fsl_qdma_ddf ddf;
 };
 
+#define FSL_QDMA_SG_MAX_ENTRY RTE_DPAAX_QDMA_JOB_SUBMIT_MAX
+#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE)
 struct fsl_qdma_cmpd_ft {
-	struct fsl_qdma_format desc_buf;
-	struct fsl_qdma_format desc_sbuf;
-	struct fsl_qdma_format desc_dbuf;
+	struct fsl_qdma_comp_sg_desc desc_buf;
+	struct fsl_qdma_comp_sg_desc desc_sbuf;
+	struct fsl_qdma_comp_sg_desc desc_dbuf;
+	uint64_t cache_align[2];
+	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	uint64_t phy_ssge;
+	uint64_t phy_dsge;
+} __rte_packed;
+
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+	uint32_t me:1;
+	uint32_t rsv0:1;
+	uint32_t rte:1;
+	uint32_t wte:1;
+	uint32_t cde:1;
+	uint32_t sde:1;
+	uint32_t dde:1;
+	uint32_t ere:1;
+	uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+	uint32_t rsv:27;
+	uint32_t block:2;
+	uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+	(sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+	uint32_t deier;
+	union {
+		rte_be32_t dedr_be;
+		struct fsl_qdma_dedr_reg dedr;
+	};
+	uint32_t rsv0[2];
+	union {
+		rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+		struct fsl_qdma_comp_cmd_desc err_cmd;
+	};
+	uint32_t rsv1[4];
+	union {
+		rte_be32_t deccqidr_be;
+		struct fsl_qdma_deccqidr_reg deccqidr;
+	};
+	rte_be32_t decbr;
+};
+
+#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+
+struct fsl_qdma_desc {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint64_t flag;
+	uint64_t len;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format *cmd_desc;
 	int used;
 	struct fsl_qdma_cmpd_ft **ft;
 	uint16_t ci;
-	uint16_t complete;
+	struct rte_ring *complete_burst;
+	struct rte_ring *complete_desc;
+	struct rte_ring *complete_pool;
 	uint16_t n_cq;
 	uint8_t block_id;
 	uint8_t queue_id;
 	uint8_t channel_id;
 	void *block_vir;
 	uint32_t le_cqmr;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t desc_in_hw[QDMA_QUEUE_SIZE];
 	struct rte_dma_stats stats;
-	uint8_t pending;
+	struct fsl_qdma_desc *pending_desc;
+	uint16_t pending_max;
+	uint16_t pending_start;
+	uint8_t pending_num;
+	uint16_t complete_start;
 	dma_addr_t bus_addr;
 	struct fsl_qdma_df **df;
 	void *engine;
@@ -200,7 +282,7 @@ struct fsl_qdma_status_queue {
 	uint16_t complete;
 	uint8_t block_id;
 	void *block_vir;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
 	struct rte_dma_stats stats;
 	dma_addr_t bus_addr;
 	void *engine;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 23/30] dma/dpaa: block dequeue
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (21 preceding siblings ...)
  2024-07-22 11:58   ` [v2 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
                     ` (7 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Perform block dequeue to identify which queue of this block is completed.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 252 ++++++++++++++++-------------------
 1 file changed, 116 insertions(+), 136 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index f1ad60d1f2..de5ecc7d0b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -442,86 +442,6 @@ fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
 	}
 }
 
-static int
-fsl_qdma_queue_drain(struct fsl_qdma_queue *fsl_queue)
-{
-	uint32_t reg;
-	int count = 0, ret;
-	uint8_t *block = fsl_queue->block_vir;
-	uint16_t *dq_complete = NULL, drain_num = 0;
-	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
-
-	while (1) {
-		if (rte_ring_free_count(fsl_queue->complete_pool) <
-			(FSL_QDMA_SG_MAX_ENTRY * 2))
-			break;
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			break;
-
-		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
-		ret = rte_ring_dequeue(fsl_queue->complete_burst,
-			(void **)&dq_complete);
-		if (ret)
-			DPAA_QDMA_ERR("DQ desc number failed!\n");
-
-		ret = rte_ring_dequeue_bulk(fsl_queue->complete_desc,
-			(void **)desc, *dq_complete, NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		fsl_qdma_data_validation(desc, *dq_complete, fsl_queue);
-
-		ret = rte_ring_enqueue_bulk(fsl_queue->complete_pool,
-			(void **)desc, (*dq_complete), NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("EQ %d descs to return queue failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		drain_num += *dq_complete;
-		fsl_queue->complete_start =
-			(fsl_queue->complete_start + (*dq_complete)) &
-			(fsl_queue->pending_max - 1);
-		fsl_queue->stats.completed++;
-
-		count++;
-	}
-
-	return drain_num;
-}
-
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
-	const uint16_t nb_cpls, uint16_t *last_idx,
-	enum rte_dma_status_code *status)
-{
-	int ret;
-	uint16_t dq_num = 0, i;
-	struct fsl_qdma_desc *desc_complete[nb_cpls];
-
-	ret = fsl_qdma_queue_drain(fsl_queue);
-	if (ret < 0) {
-		DPAA_QDMA_ERR("Drain TX%d/Q%d failed!(%d)",
-			fsl_queue->block_id, fsl_queue->queue_id,
-			ret);
-	}
-
-	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
-		(void **)desc_complete, nb_cpls, NULL);
-	for (i = 0; i < dq_num; i++)
-		last_idx[i] = desc_complete[i]->flag;
-
-	if (status) {
-		for (i = 0; i < dq_num; i++)
-			status[i] = RTE_DMA_STATUS_SUCCESSFUL;
-	}
-
-	return dq_num;
-}
-
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -682,13 +602,90 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
+{
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete = NULL;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
+
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
+
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+			cmd_queue->stats.completed++;
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret)
+				DPAA_QDMA_ERR("DQ desc number failed!\n");
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
+					*dq_complete, ret);
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("Failed desc eq %d!=%d to %s\n",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
+}
+
 static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
-	int overflow = 0, drain;
-	uint32_t reg, check_num, drain_num;
+	int overflow = 0;
+	uint32_t reg;
+	uint16_t blk_drain, check_num, drain_num;
 	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
@@ -711,11 +708,12 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	drain_num = 0;
 
 drain_again:
-	drain = fsl_qdma_queue_drain(fsl_queue);
-	if (drain <= 0) {
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
 		drain_num++;
 		if (drain_num > 100) {
-			DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 				fsl_queue->block_id, fsl_queue->queue_id,
 				st->submitted - st->completed);
 			return -ENOSPC;
@@ -724,7 +722,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	}
 	check_num++;
 	if (check_num > 10) {
-		DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+		DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 			fsl_queue->block_id, fsl_queue->queue_id,
 			st->submitted - st->completed);
 		return -ENOSPC;
@@ -1059,39 +1057,6 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
-static uint16_t
-dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
-	uint8_t block_id)
-{
-	struct fsl_qdma_status_queue *stat_queue;
-	struct fsl_qdma_queue *cmd_queue;
-	struct fsl_qdma_comp_cmd_desc *cq;
-	uint16_t start, count = 0;
-	uint8_t qid;
-	int ret;
-
-	stat_queue = &fsl_qdma->stat_queues[block_id];
-	cq = stat_queue->cq;
-	start = stat_queue->complete;
-
-	do {
-		ret = qdma_ccdf_get_queue(&cq[start], &qid);
-		if (ret == true) {
-			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
-			cmd_queue->stats.completed++;
-			start++;
-			if (unlikely(start == stat_queue->n_cq))
-				start = 0;
-			count++;
-		} else {
-			break;
-		}
-	} while (1);
-	stat_queue->complete = start;
-
-	return count;
-}
-
 static int
 dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
 {
@@ -1164,22 +1129,32 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
 		return 0;
 	}
 
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, st);
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1187,7 +1162,7 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			fsl_queue->stats.errors++;
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static uint16_t
@@ -1196,9 +1171,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -1207,13 +1184,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	}
 
 	*has_error = false;
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, NULL);
-	}
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1224,7 +1204,7 @@ dpaa_qdma_dequeue(void *dev_private,
 		}
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 24/30] dma/dpaa: improve congestion handling
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (22 preceding siblings ...)
  2024-07-22 11:58   ` [v2 23/30] dma/dpaa: block dequeue Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
                     ` (6 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The congestion issue occurs frequently on low speed device(PCIe).
We should drain the command queue to make dma work when congestion occurs.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 157 +++++++++++++++++++----------------
 1 file changed, 85 insertions(+), 72 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index de5ecc7d0b..eaa5f81f6d 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -535,73 +535,6 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static int
-fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
-	int is_burst)
-{
-	uint16_t i, num = fsl_queue->pending_num, idx, start;
-	int ret;
-
-	num = is_burst ? fsl_queue->pending_num : 1;
-
-	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
-	ret = rte_ring_enqueue(fsl_queue->complete_burst,
-			&fsl_queue->desc_in_hw[fsl_queue->ci]);
-	if (ret) {
-		DPAA_QDMA_ERR("%s: Queue is full, try dequeue first",
-			__func__);
-		DPAA_QDMA_ERR("%s: submitted:%"PRIu64", completed:%"PRIu64"",
-			__func__, fsl_queue->stats.submitted,
-			fsl_queue->stats.completed);
-		return ret;
-	}
-	start = fsl_queue->pending_start;
-	for (i = 0; i < num; i++) {
-		idx = (start + i) & (fsl_queue->pending_max - 1);
-		ret = rte_ring_enqueue(fsl_queue->complete_desc,
-				&fsl_queue->pending_desc[idx]);
-		if (ret) {
-			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int
-fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
-	dma_addr_t dst, dma_addr_t src, size_t len)
-{
-	uint8_t *block = fsl_queue->block_vir;
-	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
-	struct fsl_qdma_cmpd_ft *ft;
-	int ret;
-
-	ft = fsl_queue->ft[fsl_queue->ci];
-	csgf_src = &ft->desc_sbuf;
-	csgf_dest = &ft->desc_dbuf;
-	qdma_desc_sge_addr_set64(csgf_src, src);
-	csgf_src->length = len;
-	csgf_src->extion = 0;
-	qdma_desc_sge_addr_set64(csgf_dest, dst);
-	csgf_dest->length = len;
-	csgf_dest->extion = 0;
-	/* This entry is the last entry. */
-	csgf_dest->final = 1;
-
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
-	if (ret)
-		return ret;
-	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
-
-	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-	fsl_queue->stats.submitted++;
-
-	return 0;
-}
-
 static uint16_t
 dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	uint8_t block_id)
@@ -633,7 +566,6 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 		ret = qdma_ccdf_get_queue(&cq[start], &qid);
 		if (ret == true) {
 			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
-			cmd_queue->stats.completed++;
 
 			ret = rte_ring_dequeue(cmd_queue->complete_burst,
 				(void **)&dq_complete);
@@ -677,6 +609,87 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	return count;
 }
 
+static int
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	int is_burst)
+{
+	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
+	int ret, dq_cnt;
+
+	num = is_burst ? fsl_queue->pending_num : 1;
+
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+eq_again:
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		dq_cnt = 0;
+dq_again:
+		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
+			fsl_queue->block_id);
+		dq_cnt++;
+		if (dq > 0) {
+			goto eq_again;
+		} else {
+			if (dq_cnt < 100)
+				goto dq_again;
+			DPAA_QDMA_ERR("%s: Dq block%d failed!",
+				__func__, fsl_queue->block_id);
+		}
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
+{
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	int ret;
+
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	return 0;
+}
+
 static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
@@ -702,7 +715,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	if (likely(!overflow))
 		return 0;
 
-	DPAA_QDMA_ERR("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
 		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
 	drain_num = 0;
@@ -712,7 +725,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 		fsl_queue->block_id);
 	if (!blk_drain) {
 		drain_num++;
-		if (drain_num > 100) {
+		if (drain_num > 1000) {
 			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 				fsl_queue->block_id, fsl_queue->queue_id,
 				st->submitted - st->completed);
@@ -721,8 +734,8 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 		goto drain_again;
 	}
 	check_num++;
-	if (check_num > 10) {
-		DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+	if (check_num > 1000) {
+		DPAA_QDMA_ERR("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
 			fsl_queue->block_id, fsl_queue->queue_id,
 			st->submitted - st->completed);
 		return -ENOSPC;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 25/30] dma/dpaa: disable SG descriptor as default
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (23 preceding siblings ...)
  2024-07-22 11:58   ` [v2 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
                     ` (5 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Burst operation used for SG copy as default until SG issue is fixed.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index eaa5f81f6d..8492d0de5b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -11,7 +11,7 @@
 
 static int s_data_validation;
 static int s_hw_err_check;
-static int s_sg_disable;
+static int s_sg_disable = 1;
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -335,7 +335,6 @@ fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
 	rte_free(queue->cq);
 }
 
-
 static int
 fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	uint32_t block_id)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 26/30] dma/dpaa: improve ERRATA workaround solution
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (24 preceding siblings ...)
  2024-07-22 11:58   ` [v2 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
                     ` (4 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Fix issue of ERRATA 050757/050265 workaround which is not effective
in burst mode.

SDF/DDF is referred by first entry of compound frame table, move the DF to
compound frame table description which is suitable to adapt single copy
and SG/burst copy.

Fix SG issue which was caused by memset clearing phy address of SGE in
compound frame table.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 215 +++++++++++++++++------------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 +-
 2 files changed, 107 insertions(+), 115 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8492d0de5b..5d91ad2d70 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -11,7 +11,10 @@
 
 static int s_data_validation;
 static int s_hw_err_check;
-static int s_sg_disable = 1;
+static int s_sg_enable = 1;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+static int s_pci_read = 1;
+#endif
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -126,10 +129,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	struct fsl_qdma_comp_cmd_desc *ccdf;
 	uint16_t i, j;
 	struct fsl_qdma_cmpd_ft *ft;
-	struct fsl_qdma_df *df;
 
 	for (i = 0; i < queue->n_cq; i++) {
-		dma_addr_t phy_ft = 0, phy_df = 0;
+		dma_addr_t phy_ft = 0;
 
 		queue->ft[i] = dma_pool_alloc(NULL,
 			sizeof(struct fsl_qdma_cmpd_ft),
@@ -156,25 +158,14 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
 		queue->ft[i]->phy_dsge = phy_ft +
 			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
-
-		queue->df[i] = dma_pool_alloc(NULL,
-			sizeof(struct fsl_qdma_df),
-			RTE_CACHE_LINE_SIZE, &phy_df);
-		if (!queue->df[i]) {
-			rte_free(queue->ft[i]);
-			queue->ft[i] = NULL;
-			goto fail;
-		}
-
-		memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
-		memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
 
 		ft = queue->ft[i];
-		df = queue->df[i];
-		sdf = &df->sdf;
-		ddf = &df->ddf;
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_sge_addr_set64(&ft->desc_buf, phy_df);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
 		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
 
@@ -198,10 +189,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	return 0;
 
 fail:
-	for (j = 0; j < i; j++) {
+	for (j = 0; j < i; j++)
 		rte_free(queue->ft[j]);
-		rte_free(queue->df[j]);
-	}
 
 	return -ENOMEM;
 }
@@ -247,23 +236,12 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
 	}
-	sprintf(nm, "Descriptor Buf_%d_%d",
-		block_id, queue_id);
-	cmd_queue->df = rte_zmalloc(nm,
-			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!cmd_queue->df) {
-		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->ft);
-		rte_free(cmd_queue->cq);
-		return -ENOMEM;
-	}
 	sprintf(nm, "Pending_desc_%d_%d",
 		block_id, queue_id);
 	cmd_queue->pending_desc = rte_zmalloc(nm,
 		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
 	if (!cmd_queue->pending_desc) {
 		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->df);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
@@ -278,7 +256,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-desc_ring_%d_%d",
@@ -292,7 +269,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-pool-desc_ring_%d_%d",
@@ -307,7 +283,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 
@@ -320,7 +295,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 static void
 fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 {
-	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
 	rte_free(queue->pending_desc);
@@ -664,8 +638,30 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ft = fsl_queue->ft[fsl_queue->ci];
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -745,7 +741,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
 	uint8_t *block = fsl_queue->block_vir, i;
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
@@ -758,74 +754,10 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	struct fsl_qdma_sdf *sdf;
 #endif
 
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
-
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = &fsl_queue->df[fsl_queue->ci]->sdf;
-	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->prefetch = 1;
-#endif
-#endif
-
-	if (num == 1) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-		if (fsl_queue->pending_desc[start].len >
-			FSL_QDMA_CMD_SSS_DISTANCE) {
-			sdf->ssen = 1;
-			sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-			sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-		} else {
-			sdf->sss = 0;
-			sdf->ssd = 0;
-		}
-#endif
-		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-			fsl_queue->pending_desc[start].dst,
-			fsl_queue->pending_desc[start].src,
-			fsl_queue->pending_desc[start].len);
-		if (!ret) {
-			fsl_queue->pending_start =
-				(start + 1) & (fsl_queue->pending_max - 1);
-			fsl_queue->pending_num = 0;
-		}
-		return ret;
-	} else if (s_sg_disable) {
-		while (fsl_queue->pending_num > 0) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-			if (fsl_queue->pending_desc[start].len >
-				FSL_QDMA_CMD_SSS_DISTANCE) {
-				sdf->ssen = 1;
-				sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-				sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-			} else {
-				sdf->sss = 0;
-				sdf->ssd = 0;
-			}
-#endif
-			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-				fsl_queue->pending_desc[start].dst,
-				fsl_queue->pending_desc[start].src,
-				fsl_queue->pending_desc[start].len);
-			if (!ret) {
-				start = (start + 1) &
-					(fsl_queue->pending_max - 1);
-				fsl_queue->pending_start = start;
-				fsl_queue->pending_num--;
-			} else {
-				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
-					ret);
-				return -EIO;
-			}
-		}
 
-		return 0;
-	}
 	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
 	csgf_src->extion = 1;
 	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
@@ -849,13 +781,21 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	if (total_len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->ssen = 1;
-		sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-		sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-	} else {
-		sdf->sss = 0;
-		sdf->ssd = 0;
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
 	}
 #endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
@@ -875,6 +815,51 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	return 0;
 }
 
+static int
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+{
+	uint16_t start = fsl_queue->pending_start;
+	int ret;
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	} else if (!s_sg_enable) {
+		while (fsl_queue->pending_num > 0) {
+			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+				fsl_queue->pending_desc[start].dst,
+				fsl_queue->pending_desc[start].src,
+				fsl_queue->pending_desc[start].len);
+			if (!ret) {
+				start = (start + 1) &
+					(fsl_queue->pending_max - 1);
+				fsl_queue->pending_start = start;
+				fsl_queue->pending_num--;
+			} else {
+				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
+					ret);
+				return -EIO;
+			}
+		}
+
+		return 0;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
+
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 	__rte_unused uint32_t info_sz)
@@ -1276,6 +1261,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int regs_size;
 	int ret;
 	uint32_t i, j, k;
+	char *penv;
 
 	if (getenv("DPAA_QDMA_DATA_VALIDATION"))
 		s_data_validation = 1;
@@ -1283,8 +1269,15 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (getenv("DPAA_QDMA_HW_ERR_CHECK"))
 		s_hw_err_check = 1;
 
-	if (getenv("DPAA_QDMA_SG_DISABLE"))
-		s_sg_disable = 1;
+	penv = getenv("DPAA_QDMA_SG_ENABLE");
+	if (penv)
+		s_sg_enable = atoi(penv);
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	penv = getenv("DPAA_QDMA_PCI_READ");
+	if (penv)
+		s_pci_read = atoi(penv);
+#endif
 
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 9d8d9e865d..753ac6973d 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -88,9 +88,7 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CFG_SSS_OFFSET		12
-#define FSL_QDMA_CMD_SSS_STRIDE		128
-#define FSL_QDMA_CMD_SSS_DISTANCE	128
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
@@ -192,8 +190,10 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t cache_align[2];
 	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
 	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
 	uint64_t phy_ssge;
 	uint64_t phy_dsge;
+	uint64_t phy_df;
 } __rte_packed;
 
 #define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
@@ -273,7 +273,6 @@ struct fsl_qdma_queue {
 	uint8_t pending_num;
 	uint16_t complete_start;
 	dma_addr_t bus_addr;
-	struct fsl_qdma_df **df;
 	void *engine;
 };
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 27/30] dma/dpaa: improve silent mode support
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (25 preceding siblings ...)
  2024-07-22 11:58   ` [v2 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
                     ` (3 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Don't save eq context in silent mode, check HW status only to
identify if queue is full.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 5d91ad2d70..8f5b6c6ea5 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -586,9 +586,13 @@ static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	int is_burst)
 {
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
 	int ret, dq_cnt;
 
+	if (fsl_qdma->is_silent)
+		return 0;
+
 	num = is_burst ? fsl_queue->pending_num : 1;
 
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
@@ -697,7 +701,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 
 	check_num = 0;
 overflow_check:
-	if (unlikely(s_hw_err_check)) {
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
 		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -707,8 +711,14 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 			QDMA_QUEUE_CR_WM) ? 1 : 0;
 	}
 
-	if (likely(!overflow))
+	if (likely(!overflow)) {
 		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num < 1000)
+			goto overflow_check;
+		return -ENOSPC;
+	}
 
 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 28/30] dma/dpaa: support multiple SG copies
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (26 preceding siblings ...)
  2024-07-22 11:58   ` [v2 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
                     ` (2 subsequent siblings)
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Split burst copies to multiple SG copies if burst number exceeds
max number of SG entries.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 180 +++++++++++++++++++----------------
 drivers/dma/dpaa/dpaa_qdma.h |   2 +-
 2 files changed, 98 insertions(+), 84 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8f5b6c6ea5..383142fc75 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -584,17 +584,15 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 
 static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
-	int is_burst)
+	uint16_t num)
 {
 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
-	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
+	uint16_t i, idx, start, dq;
 	int ret, dq_cnt;
 
 	if (fsl_qdma->is_silent)
 		return 0;
 
-	num = is_burst ? fsl_queue->pending_num : 1;
-
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
 eq_again:
 	ret = rte_ring_enqueue(fsl_queue->complete_burst,
@@ -634,6 +632,69 @@ fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0;
+	uint32_t reg;
+	uint16_t blk_drain, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
+
+	check_num = 0;
+overflow_check:
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
+		reg = qdma_readl_be(block +
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
+
+	if (likely(!overflow)) {
+		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num >= 10000) {
+			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
+			check_num = 0;
+		}
+		goto overflow_check;
+	}
+
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
+		drain_num++;
+		if (drain_num >= 10000) {
+			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			drain_num = 0;
+		}
+		goto drain_again;
+	}
+	check_num++;
+	if (check_num >= 1000) {
+		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
+		check_num = 0;
+	}
+	goto overflow_check;
+
+	return 0;
+}
+
 static int
 fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	dma_addr_t dst, dma_addr_t src, size_t len)
@@ -646,6 +707,10 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_sdf *sdf;
 #endif
 
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	ft = fsl_queue->ft[fsl_queue->ci];
 
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
@@ -677,7 +742,7 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	/* This entry is the last entry. */
 	csgf_dest->final = 1;
 
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
 	if (ret)
 		return ret;
 	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
@@ -689,81 +754,30 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
-static int
-fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
-{
-	int overflow = 0;
-	uint32_t reg;
-	uint16_t blk_drain, check_num, drain_num;
-	uint8_t *block = fsl_queue->block_vir;
-	const struct rte_dma_stats *st = &fsl_queue->stats;
-	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
-
-	check_num = 0;
-overflow_check:
-	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
-		reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
-		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
-			1 : 0;
-	} else {
-		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
-			QDMA_QUEUE_CR_WM) ? 1 : 0;
-	}
-
-	if (likely(!overflow)) {
-		return 0;
-	} else if (fsl_qdma->is_silent) {
-		check_num++;
-		if (check_num < 1000)
-			goto overflow_check;
-		return -ENOSPC;
-	}
-
-	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
-		fsl_queue->block_id, fsl_queue->queue_id,
-		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
-	drain_num = 0;
-
-drain_again:
-	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
-		fsl_queue->block_id);
-	if (!blk_drain) {
-		drain_num++;
-		if (drain_num > 1000) {
-			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
-				fsl_queue->block_id, fsl_queue->queue_id,
-				st->submitted - st->completed);
-			return -ENOSPC;
-		}
-		goto drain_again;
-	}
-	check_num++;
-	if (check_num > 1000) {
-		DPAA_QDMA_ERR("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
-			fsl_queue->block_id, fsl_queue->queue_id,
-			st->submitted - st->completed);
-		return -ENOSPC;
-	}
-	goto overflow_check;
-
-	return -ENOSPC;
-}
-
 static int
 fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
-	uint8_t *block = fsl_queue->block_vir, i;
+	uint8_t *block = fsl_queue->block_vir;
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
-	uint32_t total_len = 0;
-	uint8_t num = fsl_queue->pending_num;
-	uint16_t start = fsl_queue->pending_start, idx;
+	uint32_t total_len;
+	uint16_t start, idx, num, i;
 	int ret;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
 #endif
 
+eq_sg:
+	total_len = 0;
+	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
+		num = FSL_QDMA_SG_MAX_ENTRY;
+	else
+		num = fsl_queue->pending_num;
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
@@ -808,7 +822,7 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 		}
 	}
 #endif
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
 	if (ret)
 		return ret;
 
@@ -820,7 +834,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 
 	fsl_queue->pending_start =
 		(start + num) & (fsl_queue->pending_max - 1);
-	fsl_queue->pending_num = 0;
+	fsl_queue->pending_num -= num;
+	if (fsl_queue->pending_num > 0)
+		goto eq_sg;
 
 	return 0;
 }
@@ -831,10 +847,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	uint16_t start = fsl_queue->pending_start;
 	int ret;
 
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
-
 	if (fsl_queue->pending_num == 1) {
 		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
 			fsl_queue->pending_desc[start].dst,
@@ -871,17 +883,19 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 }
 
 static int
-dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	__rte_unused uint32_t info_sz)
+dpaa_qdma_info_get(const struct rte_dma_dev *dev,
+	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
 	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = fsl_qdma->n_queues;
 	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
 	dev_info->min_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
 
 	return 0;
 }
@@ -985,9 +999,9 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	uint16_t idx;
 	int ret;
 
-	if (pending >= FSL_QDMA_SG_MAX_ENTRY) {
-		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
-			vchan);
+	if (pending >= fsl_queue->pending_max) {
+		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
+			pending, vchan);
 		return -ENOSPC;
 	}
 	idx = (start + pending) & (fsl_queue->pending_max - 1);
@@ -1253,7 +1267,7 @@ dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
-	.dev_info_get		  = dpaa_info_get,
+	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 753ac6973d..146151ab8c 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -270,7 +270,7 @@ struct fsl_qdma_queue {
 	struct fsl_qdma_desc *pending_desc;
 	uint16_t pending_max;
 	uint16_t pending_start;
-	uint8_t pending_num;
+	uint16_t pending_num;
 	uint16_t complete_start;
 	dma_addr_t bus_addr;
 	void *engine;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 29/30] dma/dpaa: support max SG entry size
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (27 preceding siblings ...)
  2024-07-22 11:58   ` [v2 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 11:58   ` [v2 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

SG transaction is not helpful for performance of large transaction size.
Start single transaction for size > max SG entry size in SG copy.

Default MAX SG entry size is 2000 bytes which is experiment data of
mem to mem, user can change it according to experiment:
export DPAA_QDMA_SG_MAX_ENTRY_SIZE=xxx

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 41 ++++++++++++++++++++++++++++++++----
 1 file changed, 37 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 383142fc75..e3f2777b40 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -12,6 +12,8 @@
 static int s_data_validation;
 static int s_hw_err_check;
 static int s_sg_enable = 1;
+static uint32_t s_sg_max_entry_sz = 2000;
+
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 static int s_pci_read = 1;
 #endif
@@ -761,7 +763,7 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	uint32_t total_len;
-	uint16_t start, idx, num, i;
+	uint16_t start, idx, num, i, next_idx;
 	int ret;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
@@ -770,13 +772,31 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 eq_sg:
 	total_len = 0;
 	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
+		fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num--;
+		}
+		if (fsl_queue->pending_num > 0)
+			goto eq_sg;
+
+		return ret;
+	}
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
 		num = FSL_QDMA_SG_MAX_ENTRY;
 	else
 		num = fsl_queue->pending_num;
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
@@ -799,7 +819,16 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
 		ft->desc_dsge[i].final = 0;
 		total_len += fsl_queue->pending_desc[idx].len;
+		if ((i + 1) != num) {
+			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
+			if (fsl_queue->pending_desc[next_idx].len >
+				s_sg_max_entry_sz) {
+				num = i + 1;
+				break;
+			}
+		}
 	}
+
 	ft->desc_ssge[num - 1].final = 1;
 	ft->desc_dsge[num - 1].final = 1;
 	csgf_src->length = total_len;
@@ -1297,6 +1326,10 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (penv)
 		s_sg_enable = atoi(penv);
 
+	penv = getenv("DPAA_QDMA_SG_MAX_ENTRY_SIZE");
+	if (penv)
+		s_sg_max_entry_sz = atoi(penv);
+
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	penv = getenv("DPAA_QDMA_PCI_READ");
 	if (penv)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v2 30/30] bus/dpaa: add port bmi stats
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (28 preceding siblings ...)
  2024-07-22 11:58   ` [v2 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
@ 2024-07-22 11:58   ` Gagandeep Singh
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  30 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 11:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add BMI statistics and fixing the existing extended
statistics

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 65 +++++++++++++++++++++++++++-
 drivers/bus/dpaa/include/fman.h      |  4 +-
 drivers/bus/dpaa/include/fsl_fman.h  | 12 +++++
 drivers/bus/dpaa/version.map         |  4 ++
 drivers/net/dpaa/dpaa_ethdev.c       | 46 +++++++++++++++++---
 drivers/net/dpaa/dpaa_ethdev.h       | 12 +++++
 6 files changed, 134 insertions(+), 9 deletions(-)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 24a99f7235..27b39a4975 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -244,8 +244,8 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
 
 	for (i = 0; i < n; i++)
-		value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) |
-				(u64)in_be32((char *)regs + base_offset +
+		value[i] = ((u64)in_be32((char *)regs + base_offset + 8 * i) |
+				((u64)in_be32((char *)regs + base_offset +
 				8 * i + 4)) << 32);
 }
 
@@ -266,6 +266,67 @@ fman_if_stats_reset(struct fman_if *p)
 		;
 }
 
+void
+fman_if_bmi_stats_enable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp |= FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_disable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp &= ~FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	int i = 0;
+
+	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
+	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
+}
+
+void
+fman_if_bmi_stats_reset(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+
+	out_be32(&regs->fmbm_rfrc, 0);
+	out_be32(&regs->fmbm_rfbc, 0);
+	out_be32(&regs->fmbm_rlfc, 0);
+	out_be32(&regs->fmbm_rffc, 0);
+	out_be32(&regs->fmbm_rfdc, 0);
+	out_be32(&regs->fmbm_rfldec, 0);
+	out_be32(&regs->fmbm_rodc, 0);
+	out_be32(&regs->fmbm_rbdc, 0);
+}
+
 void
 fman_if_promiscuous_enable(struct fman_if *p)
 {
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index 3a6dd555a7..60681068ea 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -56,6 +56,8 @@
 #define FMAN_PORT_BMI_FIFO_UNITS	0x100
 #define FMAN_PORT_IC_OFFSET_UNITS	0x10
 
+#define FMAN_BMI_COUNTERS_EN 0x80000000
+
 #define FMAN_ENABLE_BPOOL_DEPLETION	0xF00000F0
 
 #define HASH_CTRL_MCAST_EN	0x00000100
@@ -260,7 +262,7 @@ struct rx_bmi_regs {
 					/**< Buffer Manager pool Information-*/
 	uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
 					/**< Allocate Counter-*/
-	uint32_t reserved0130[8];
+	uint32_t reserved0120[16];
 					/**< 0x130/0x140 - 0x15F reserved -*/
 	uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
 					/**< Congestion Group Map*/
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 20690f8329..5a9750ad0c 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -60,6 +60,18 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
+__rte_internal
+void fman_if_bmi_stats_enable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_disable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value);
+
+__rte_internal
+void fman_if_bmi_stats_reset(struct fman_if *p);
+
 /* Set ignore pause option for a specific interface */
 void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
 
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 3f547f75cf..a17d57632e 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -24,6 +24,10 @@ INTERNAL {
 	fman_dealloc_bufs_mask_hi;
 	fman_dealloc_bufs_mask_lo;
 	fman_if_add_mac_addr;
+	fman_if_bmi_stats_enable;
+	fman_if_bmi_stats_disable;
+	fman_if_bmi_stats_get_all;
+	fman_if_bmi_stats_reset;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
 	fman_if_discard_rx_errors;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 060b8c678f..4d9a4c7e6d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -131,6 +131,22 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 		offsetof(struct dpaa_if_stats, tvlan)},
 	{"rx_undersized",
 		offsetof(struct dpaa_if_stats, tund)},
+	{"rx_frame_counter",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
+	{"rx_bad_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
+	{"rx_large_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
+	{"rx_filter_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
+	{"rx_frame_discrad_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
+	{"rx_frame_list_dma_err_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
+	{"rx_out_of_buffer_discard ",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
+	{"rx_buf_diallocate",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
@@ -430,6 +446,7 @@ static void dpaa_interrupt_handler(void *param)
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
 	uint16_t i;
 
 	PMD_INIT_FUNC_TRACE();
@@ -443,7 +460,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 	else
 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
 
-	fman_if_enable_rx(dev->process_private);
+	fman_if_bmi_stats_enable(fif);
+	fman_if_bmi_stats_reset(fif);
+	fman_if_enable_rx(fif);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -461,8 +480,10 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 	dev->data->dev_started = 0;
 
-	if (!fif->is_shared_mac)
+	if (!fif->is_shared_mac) {
+		fman_if_bmi_stats_disable(fif);
 		fman_if_disable_rx(fif);
+	}
 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -769,6 +790,7 @@ static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	fman_if_stats_reset(dev->process_private);
+	fman_if_bmi_stats_reset(dev->process_private);
 
 	return 0;
 }
@@ -777,8 +799,9 @@ static int
 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned int n)
 {
-	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (n < num)
 		return num;
@@ -789,10 +812,16 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	fman_if_stats_get_all(dev->process_private, values,
 			      sizeof(struct dpaa_if_stats) / 8);
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < num - (bmi_count - 1); i++) {
 		xstats[i].id = i;
 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
 	}
+	fman_if_bmi_stats_get_all(dev->process_private, values);
+	for (j = 0; i < num; i++, j++) {
+		xstats[i].id = i;
+		xstats[i].value = values[j];
+	}
+
 	return i;
 }
 
@@ -819,8 +848,9 @@ static int
 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		      uint64_t *values, unsigned int n)
 {
-	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (!ids) {
 		if (n < stat_cnt)
@@ -832,10 +862,14 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		fman_if_stats_get_all(dev->process_private, values_copy,
 				      sizeof(struct dpaa_if_stats) / 8);
 
-		for (i = 0; i < stat_cnt; i++)
+		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
 			values[i] =
 				values_copy[dpaa_xstats_strings[i].offset / 8];
 
+		fman_if_bmi_stats_get_all(dev->process_private, values);
+		for (j = 0; i < stat_cnt; i++, j++)
+			values[i] = values_copy[j];
+
 		return stat_cnt;
 	}
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b6c61b8b6b..261a5a3ca7 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -212,6 +212,18 @@ dpaa_rx_cb_atomic(void *event,
 		  const struct qm_dqrr_entry *dqrr,
 		  void **bufs);
 
+struct dpaa_if_rx_bmi_stats {
+	uint32_t fmbm_rstc;		/**< Rx Statistics Counters*/
+	uint32_t fmbm_rfrc;		/**< Rx Frame Counter*/
+	uint32_t fmbm_rfbc;		/**< Rx Bad Frames Counter*/
+	uint32_t fmbm_rlfc;		/**< Rx Large Frames Counter*/
+	uint32_t fmbm_rffc;		/**< Rx Filter Frames Counter*/
+	uint32_t fmbm_rfdc;		/**< Rx Frame Discard Counter*/
+	uint32_t fmbm_rfldec;		/**< Rx Frames List DMA Error Counter*/
+	uint32_t fmbm_rodc;		/**< Rx Out of Buffers Discard nntr*/
+	uint32_t fmbm_rbdc;		/**< Rx Buffers Deallocate Counter*/
+};
+
 /* PMD related logs */
 extern int dpaa_logtype_pmd;
 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 00/30] NXP DMA driver fixes and Enhancements
  2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                     ` (29 preceding siblings ...)
  2024-07-22 11:58   ` [v2 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
@ 2024-07-22 16:39   ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                       ` (29 more replies)
  30 siblings, 30 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev

V3 changes:
* fix 32 bit compilation issue

V2 changes:
* fix compilation issue on ubuntu 22.04

Gagandeep Singh (6):
  dma/dpaa: support multi channels
  dma/dpaa: fix job enqueue
  dma/dpaa: add workaround for ERR050757
  dma/dpaa: qdma stall workaround for ERR050265
  dma/dpaa: remove unwanted desc
  dma/dpaa: data path optimization

Hemant Agrawal (1):
  bus/dpaa: add port bmi stats

Jun Yang (22):
  dma/dpaa2: configure route by port by PCIe port param
  dma/dpaa2: support multiple HW queues
  dma/dpaa2: adapt DMA driver API
  dma/dpaa2: multiple process support
  dma/dpaa2: add sanity check for SG entry
  dma/dpaa2: include DPAA2 specific header files
  dma/dpaa2: borrow flags of DMA operation to pass job context
  bus/fslmc: enhance the qbman dq storage logic
  dma/dpaa2: add short FD support
  dma/dpaa2: limit the max descriptor number
  dma/dpaa2: change the DMA copy return value
  dma/dpaa2: move the qdma header to common place
  dma/dpaa: refactor driver
  dma/dpaa: dequeue status queue
  dma/dpaa: add Scatter Gather support
  dma/dpaa: block dequeue
  dma/dpaa: improve congestion handling
  dma/dpaa: disable SG descriptor as default
  dma/dpaa: improve ERRATA workaround solution
  dma/dpaa: improve silent mode support
  dma/dpaa: support multiple SG copies
  dma/dpaa: support max SG entry size

Vanshika Shukla (1):
  dma/dpaa: add burst capacity API

 config/arm/meson.build                        |    4 +-
 doc/api/doxy-api-index.md                     |    2 +-
 doc/api/doxy-api.conf.in                      |    2 +-
 doc/guides/dmadevs/dpaa.rst                   |    3 +
 drivers/bus/dpaa/base/fman/fman_hw.c          |   65 +-
 drivers/bus/dpaa/include/fman.h               |    4 +-
 drivers/bus/dpaa/include/fsl_fman.h           |   12 +
 drivers/bus/dpaa/version.map                  |    4 +
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c      |   25 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |    7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |   38 +-
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  |   29 +-
 drivers/common/dpaax/meson.build              |    3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h     |   23 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c   |   23 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c   |    4 +-
 drivers/dma/dpaa/dpaa_qdma.c                  | 1641 +++++++-----
 drivers/dma/dpaa/dpaa_qdma.h                  |  289 +-
 drivers/dma/dpaa2/dpaa2_qdma.c                | 2381 +++++++++--------
 drivers/dma/dpaa2/dpaa2_qdma.h                |  243 +-
 drivers/dma/dpaa2/meson.build                 |    4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        |  177 --
 drivers/dma/dpaa2/version.map                 |   14 -
 drivers/net/dpaa/dpaa_ethdev.c                |   46 +-
 drivers/net/dpaa/dpaa_ethdev.h                |   12 +
 drivers/net/dpaa2/dpaa2_ethdev.c              |   81 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   19 +-
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c         |    4 +-
 28 files changed, 2856 insertions(+), 2303 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
 delete mode 100644 drivers/dma/dpaa2/version.map

-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 01/30] dma/dpaa2: configure route by port by PCIe port param
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
                       ` (28 subsequent siblings)
  29 siblings, 1 reply; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

struct {
	uint64_t coreid : 4; /**--rbp.sportid / rbp.dportid*/
	uint64_t pfid : 8; /**--rbp.spfid / rbp.dpfid*/
	uint64_t vfen : 1; /**--rbp.svfa / rbp.dvfa*/
	uint64_t vfid : 16; /**--rbp.svfid / rbp.dvfid*/
	.....
} pcie;

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  | 29 ++++++---
 drivers/dma/dpaa2/dpaa2_qdma.c                | 59 +++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h                | 38 +++++++++++-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        | 55 +----------------
 drivers/dma/dpaa2/version.map                 |  1 -
 5 files changed, 100 insertions(+), 82 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48ffb1b46e..7528b610e1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
  *
  */
 #ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
 			uint32_t saddr_hi;
 
 			uint32_t len_sl:18;
-			uint32_t rsv1:14;
-
+			uint32_t rsv13:2;
+			uint32_t svfid:6;
+			uint32_t rsv12:2;
+			uint32_t spfid:2;
+			uint32_t rsv1:2;
 			uint32_t sportid:4;
-			uint32_t rsv2:22;
+			uint32_t rsv2:1;
+			uint32_t sca:1;
+			uint32_t sat:2;
+			uint32_t sattr:3;
+			uint32_t svfa:1;
+			uint32_t stc:3;
 			uint32_t bmt:1;
-			uint32_t rsv3:1;
+			uint32_t dvfid:6;
+			uint32_t rsv3:2;
+			uint32_t dpfid:2;
+			uint32_t rsv31:2;
 			uint32_t fmt:2;
 			uint32_t sl:1;
 			uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
 			uint32_t acc_err:4;
 			uint32_t rsv5:4;
 			uint32_t ser:1;
-			uint32_t rsv6:3;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
 			uint32_t wrttype:4;
 			uint32_t dqos:3;
 			uint32_t drbp:1;
 			uint32_t dlwc:2;
-			uint32_t rsv7:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
 			uint32_t rdttype:4;
 			uint32_t sqos:3;
 			uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
 			uint32_t saddr_lo;
 
 			uint32_t saddr_hi:17;
-			uint32_t rsv1:15;
+			uint32_t rsv1_att:15;
 
 			uint32_t len;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 2c91ceec13..5954b552b5 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
 static inline int
 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
 		     uint32_t len, struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_rbp *rbp, int ser)
+		     struct dpaa2_qdma_rbp *rbp, int ser)
 {
 	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
 	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 			uint64_t fle_iova,
-			struct rte_dpaa2_qdma_rbp *rbp,
+			struct dpaa2_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags, uint32_t fmt)
 {
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* source */
 		sdd->read_cmd.portid = rbp->sportid;
 		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->svfid;
 
 		if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* destination */
 		sdd->write_cmd.portid = rbp->dportid;
 		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->dvfid;
 
 		if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
 		     struct rte_dpaa2_qdma_job **job,
 		     uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	size_t iova;
 	int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
 				  struct rte_dpaa2_qdma_job **job,
 				  uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
 			   struct rte_dpaa2_qdma_job **job,
 			   uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
 			struct rte_dpaa2_qdma_job **job,
 			uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	void *elem;
 	struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
 	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
 }
 
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-				struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+	const struct rte_dma_vchan_conf *conf)
 {
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.dportid = conf->dst_port.pcie.coreid;
+		vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+		if (conf->dst_port.pcie.vfen) {
+			vq->rbp.dvfa = 1;
+			vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+		}
+		vq->rbp.drbp = 1;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.sportid = conf->src_port.pcie.coreid;
+		vq->rbp.spfid = conf->src_port.pcie.pfid;
+		if (conf->src_port.pcie.vfen) {
+			vq->rbp.svfa = 1;
+			vq->rbp.dvfid = conf->src_port.pcie.vfid;
+		}
+		vq->rbp.srbp = 1;
+	}
 
-	memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
-			sizeof(struct rte_dpaa2_qdma_rbp));
+	return 0;
 }
 
 static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
-	int sg_enable = 0;
+	int sg_enable = 0, ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
+	ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+	if (ret)
+		return ret;
+
 	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
 		sg_enable = 1;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 5941b5a5d3..811906fcbc 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
 	};
 } __rte_packed;
 
+struct dpaa2_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	uint32_t dvfa:1;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	uint32_t svfa:1;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:2;
+};
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
 	/** Route by port */
-	struct rte_dpaa2_qdma_rbp rbp;
+	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
 	/** States if this vq has exclusively associated hw queue */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 5a8da46d12..b0bf9d8bcc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -13,42 +13,6 @@
 /** States if the destination addresses is physical. */
 #define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
 
-struct rte_dpaa2_qdma_rbp {
-	uint32_t use_ultrashort:1;
-	uint32_t enable:1;
-	/**
-	 * dportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t dportid:4;
-	uint32_t dpfid:2;
-	uint32_t dvfid:6;
-	/*using route by port for destination */
-	uint32_t drbp:1;
-	/**
-	 * sportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t sportid:4;
-	uint32_t spfid:2;
-	uint32_t svfid:6;
-	/* using route by port for source */
-	uint32_t srbp:1;
-	/* Virtual Function Active */
-	uint32_t vfa:1;
-	uint32_t rsv:3;
-};
-
 /** Determines a QDMA job */
 struct rte_dpaa2_qdma_job {
 	/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
 	 */
 	uint16_t status;
 	uint16_t vq_id;
+	uint64_t cnxt;
 	/**
 	 * FLE pool element maintained by user, in case no qDMA response.
 	 * Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param rbp_config
- *   Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_rbp *rbp_config);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
index 713ed41f0c..eb012cfbfc 100644
--- a/drivers/dma/dpaa2/version.map
+++ b/drivers/dma/dpaa2/version.map
@@ -10,5 +10,4 @@ EXPERIMENTAL {
 	rte_dpaa2_qdma_copy_multi;
 	rte_dpaa2_qdma_vchan_fd_us_enable;
 	rte_dpaa2_qdma_vchan_internal_sg_enable;
-	rte_dpaa2_qdma_vchan_rbp_enable;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 02/30] dma/dpaa2: support multiple HW queues
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 20:19       ` Stephen Hemminger
  2024-10-07 20:51       ` Stephen Hemminger
  2024-07-22 16:39     ` [v3 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
                       ` (27 subsequent siblings)
  29 siblings, 2 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Initialize and Configure queues of dma device according to hw queues
supported from mc bus.
Because multiple queues per device are supported, virt queues
implementation are dropped.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 312 +++++++++++++++------------------
 drivers/dma/dpaa2/dpaa2_qdma.h |   6 +-
 2 files changed, 140 insertions(+), 178 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5954b552b5..945ba71e4a 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -478,9 +478,9 @@ dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
 
 static inline uint16_t
 dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-			     const struct qbman_fd *fd,
-			     struct rte_dpaa2_qdma_job **job,
-			     uint16_t *nb_jobs)
+	const struct qbman_fd *fd,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t *nb_jobs)
 {
 	struct qbman_fle *fle;
 	struct rte_dpaa2_qdma_job **ppjob = NULL;
@@ -512,9 +512,9 @@ dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
 
 static inline uint16_t
 dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-			 const struct qbman_fd *fd,
-			 struct rte_dpaa2_qdma_job **job,
-			 uint16_t *nb_jobs)
+	const struct qbman_fd *fd,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t *nb_jobs)
 {
 	struct qbman_fle *fle;
 	struct rte_dpaa2_qdma_job **ppjob = NULL;
@@ -548,12 +548,12 @@ dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
 /* Function to receive a QDMA job for a given device and queue*/
 static int
 dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-				     uint16_t *vq_id,
-				     struct rte_dpaa2_qdma_job **job,
-				     uint16_t nb_jobs)
+	uint16_t *vq_id,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
@@ -562,7 +562,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
+	uint16_t rx_fqid;
 	int ret, pull_size;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
@@ -575,15 +575,17 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
 				rte_gettid());
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
+	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
+	rx_fqid = rxq->fqid;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_jobs > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_jobs;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -697,12 +699,12 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 
 static int
 dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-					uint16_t *vq_id,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs)
+	uint16_t *vq_id,
+	struct rte_dpaa2_qdma_job **job,
+	uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
@@ -710,7 +712,7 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
+	uint16_t rx_fqid;
 	int ret, next_pull, num_pulled = 0;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
@@ -725,15 +727,15 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
 				rte_gettid());
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	rxq = &(dpdmai_dev->rx_queue[0]);
+	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
+	rx_fqid = rxq->fqid;
 
 	do {
 		dq_storage = rxq->q_storage->dq_storage[0];
@@ -810,7 +812,7 @@ dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
 			uint16_t nb_jobs)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
 	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
 	struct qbman_eq_desc eqdesc;
 	struct qbman_swp *swp;
@@ -931,8 +933,8 @@ dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 
 static int
 dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-		   rte_iova_t src, rte_iova_t dst,
-		   uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -966,8 +968,8 @@ dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
 
 int
 rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-			  struct rte_dpaa2_qdma_job **jobs,
-			  uint16_t nb_cpls)
+	struct rte_dpaa2_qdma_job **jobs,
+	uint16_t nb_cpls)
 {
 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
 	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
@@ -978,14 +980,11 @@ rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
 }
 
 static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
-			 struct qdma_virt_queue *qdma_vq,
-			 struct rte_dpaa2_qdma_job **jobs,
-			 uint16_t nb_jobs)
+dpaa2_qdma_dequeue_multi(struct qdma_virt_queue *qdma_vq,
+	struct rte_dpaa2_qdma_job **jobs,
+	uint16_t nb_jobs)
 {
-	struct qdma_virt_queue *temp_qdma_vq;
-	int ring_count;
-	int ret = 0, i;
+	int ret;
 
 	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
 		/** Make sure there are enough space to get jobs.*/
@@ -1002,42 +1001,12 @@ dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
 		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
 				qdma_vq->num_dequeues), nb_jobs);
 
-	if (qdma_vq->exclusive_hw_queue) {
-		/* In case of exclusive queue directly fetch from HW queue */
-		ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-		if (ret < 0) {
-			DPAA2_QDMA_ERR(
-				"Dequeue from DPDMAI device failed: %d", ret);
-			return ret;
-		}
-	} else {
-		uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
-
-		/* Get the QDMA completed jobs from the software ring.
-		 * In case they are not available on the ring poke the HW
-		 * to fetch completed jobs from corresponding HW queues
-		 */
-		ring_count = rte_ring_count(qdma_vq->status_ring);
-		if (ring_count < nb_jobs) {
-			ret = qdma_vq->dequeue_job(qdma_vq,
-					temp_vq_id, jobs, nb_jobs);
-			for (i = 0; i < ret; i++) {
-				temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
-				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(jobs[i]));
-			}
-			ring_count = rte_ring_count(
-					qdma_vq->status_ring);
-		}
-
-		if (ring_count) {
-			/* Dequeue job from the software ring
-			 * to provide to the user
-			 */
-			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-						    (void **)jobs,
-						    ring_count, NULL);
-		}
+	ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
+	if (ret < 0) {
+		DPAA2_QDMA_ERR("Dequeue from DMA%d-q%d failed(%d)",
+			qdma_vq->dpdmai_dev->dpdmai_id,
+			qdma_vq->vq_id, ret);
+		return ret;
 	}
 
 	qdma_vq->num_dequeues += ret;
@@ -1046,9 +1015,9 @@ dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
 
 static uint16_t
 dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			  const uint16_t nb_cpls,
-			  uint16_t *last_idx,
-			  enum rte_dma_status_code *st)
+	const uint16_t nb_cpls,
+	uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1056,7 +1025,7 @@ dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
 	int ret, i;
 
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	ret = dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
 
 	for (i = 0; i < ret; i++)
 		st[i] = jobs[i]->status;
@@ -1071,8 +1040,8 @@ dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
-		   uint16_t vchan, const uint16_t nb_cpls,
-		   uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1082,7 +1051,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 
 	RTE_SET_USED(has_error);
 
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
+	ret = dpaa2_qdma_dequeue_multi(qdma_vq,
 				jobs, nb_cpls);
 
 	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
@@ -1103,16 +1072,15 @@ rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 
-	return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	return dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
 }
 
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
-		    struct rte_dma_info *dev_info,
-		    uint32_t info_sz)
+	struct rte_dma_info *dev_info,
+	uint32_t info_sz __rte_unused)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
 			     RTE_DMA_CAPA_MEM_TO_DEV |
@@ -1120,7 +1088,7 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 			     RTE_DMA_CAPA_DEV_TO_MEM |
 			     RTE_DMA_CAPA_SILENT |
 			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
 
@@ -1129,12 +1097,13 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 
 static int
 dpaa2_qdma_configure(struct rte_dma_dev *dev,
-		     const struct rte_dma_conf *dev_conf,
-		     uint32_t conf_sz)
+	const struct rte_dma_conf *dev_conf,
+	uint32_t conf_sz)
 {
 	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint16_t i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1142,9 +1111,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
 	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before config.");
-		return -1;
+		DPAA2_QDMA_ERR("%s Not stopped, configure failed.",
+			dev->data->dev_name);
+		return -EBUSY;
 	}
 
 	/* Allocate Virtual Queues */
@@ -1156,6 +1125,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
 		return -ENOMEM;
 	}
+	for (i = 0; i < dev_conf->nb_vchans; i++)
+		qdma_dev->vqs[i].vq_id = i;
+
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
 
 	return 0;
@@ -1257,13 +1229,12 @@ dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 
 static int
 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
-		       const struct rte_dma_vchan_conf *conf,
-		       uint32_t conf_sz)
+	const struct rte_dma_vchan_conf *conf,
+	uint32_t conf_sz)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
-	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
 	int sg_enable = 0, ret;
@@ -1301,20 +1272,6 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
 	}
 
-	if (qdma_dev->num_vqs == 1)
-		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
-	else {
-		/* Allocate a Ring for Virtual Queue in VQ mode */
-		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
-			 dev->data->dev_id, vchan);
-		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
-			conf->nb_desc, rte_socket_id(), 0);
-		if (!qdma_dev->vqs[vchan].status_ring) {
-			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			return rte_errno;
-		}
-	}
-
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
@@ -1410,8 +1367,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 
 	/* In case QDMA device is not in stopped state, return -EBUSY */
 	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before reset.");
+		DPAA2_QDMA_ERR("%s Not stopped, reset failed.",
+			dev->data->dev_name);
 		return -EBUSY;
 	}
 
@@ -1424,10 +1381,6 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 		}
 	}
 
-	/* Reset and free virtual queues */
-	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		rte_ring_free(qdma_dev->vqs[i].status_ring);
-	}
 	rte_free(qdma_dev->vqs);
 	qdma_dev->vqs = NULL;
 
@@ -1504,29 +1457,35 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	int ret;
+	struct dpaa2_queue *rxq;
+	int ret, i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			     dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("dmdmai disable failed");
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) disable failed",
+			dpdmai_dev->dpdmai_id);
+	}
 
 	/* Set up the DQRR storage for Rx */
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
-
-	if (rxq->q_storage) {
-		dpaa2_free_dq_storage(rxq->q_storage);
-		rte_free(rxq->q_storage);
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+		}
 	}
 
 	/* Close the device at underlying layer*/
 	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("Failure closing dpdmai device");
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed",
+			dpdmai_dev->dpdmai_id);
+	}
 
-	return 0;
+	return ret;
 }
 
 static int
@@ -1538,80 +1497,87 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 	struct dpdmai_rx_queue_attr rx_attr;
 	struct dpdmai_tx_queue_attr tx_attr;
 	struct dpaa2_queue *rxq;
-	int ret;
+	int ret, i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	/* Open DPDMAI device */
 	dpdmai_dev->dpdmai_id = dpdmai_id;
 	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
-					  RTE_CACHE_LINE_SIZE);
+	dpdmai_dev->qdma_dev = rte_malloc(NULL,
+		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
 	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
 		return ret;
 	}
 
 	/* Get DPDMAI attributes */
 	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				    dpdmai_dev->token, &attr);
+			dpdmai_dev->token, &attr);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
-			       ret);
+		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
 		goto init_err;
 	}
 	dpdmai_dev->num_queues = attr.num_of_queues;
 
-	/* Set up Rx Queue */
-	memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-	ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				  CMD_PRI_LOW,
-				  dpdmai_dev->token,
-				  0, 0, &rx_queue_cfg);
-	if (ret) {
-		DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
-			       ret);
-		goto init_err;
-	}
+	/* Set up Rx Queues */
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			goto init_err;
+		}
 
-	/* Allocate DQ storage for the DPDMAI Rx queues */
-	rxq = &(dpdmai_dev->rx_queue[0]);
-	rxq->q_storage = rte_malloc("dq_storage",
-				    sizeof(struct queue_storage_info_t),
-				    RTE_CACHE_LINE_SIZE);
-	if (!rxq->q_storage) {
-		DPAA2_QDMA_ERR("q_storage allocation failed");
-		ret = -ENOMEM;
-		goto init_err;
-	}
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq = &dpdmai_dev->rx_queue[i];
+		rxq->q_storage = rte_malloc("dq_storage",
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s DQ info(Q%d) alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto init_err;
+		}
 
-	memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-	ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
-		goto init_err;
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s DQ storage(Q%d) alloc failed(%d)",
+				dev->data->dev_name, i, ret);
+			goto init_err;
+		}
 	}
 
-	/* Get Rx and Tx queues FQID */
-	ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &rx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dpdmai_dev->num_queues; i++) {
+		ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			goto init_err;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
 
-	ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &tx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
+		ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			goto init_err;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
 	}
-	dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
 
 	/* Enable the device */
 	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 811906fcbc..786dcb9308 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -18,7 +18,7 @@
 
 #define DPAA2_QDMA_MAX_SG_NB 64
 
-#define DPAA2_DPDMAI_MAX_QUEUES	1
+#define DPAA2_DPDMAI_MAX_QUEUES	16
 
 /** FLE single job pool size: job pointer(uint64_t) +
  * 3 Frame list + 2 source/destination descriptor.
@@ -245,8 +245,6 @@ typedef int (qdma_enqueue_multijob_t)(
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
-	/** Status ring of the virtual queue */
-	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
@@ -255,8 +253,6 @@ struct qdma_virt_queue {
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
-	/** States if this vq has exclusively associated hw queue */
-	uint8_t exclusive_hw_queue;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 03/30] dma/dpaa2: adapt DMA driver API
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 04/30] dma/dpaa2: multiple process support Gagandeep Singh
                       ` (26 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

1) Support DMA single copy and SG copy.
2) Silent mode support.

Add index combined with length field.
For Silent mode, this index is used to notify DMA driver
which inner descriptor should be used.
For none silent mode, this index is used to notify user
which descriptor is completed.
In addition, because dpaa2 qdma is not able to preserve order,
"rte_dma_completed_t" returns multiple indexes instead of last index.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 1667 +++++++++++-------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  126 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  119 +-
 drivers/dma/dpaa2/version.map          |   13 -
 4 files changed, 799 insertions(+), 1126 deletions(-)
 delete mode 100644 drivers/dma/dpaa2/version.map

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 945ba71e4a..15d3776603 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -16,218 +16,345 @@
 
 #define DPAA2_QDMA_PREFETCH "prefetch"
 
-uint32_t dpaa2_coherent_no_alloc_cache;
-uint32_t dpaa2_coherent_alloc_cache;
+static uint32_t dpaa2_coherent_no_alloc_cache;
+static uint32_t dpaa2_coherent_alloc_cache;
 
 static inline int
-qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd,
-		     struct dpaa2_qdma_rbp *rbp, int ser)
+qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
+	const uint16_t *elem, uint16_t nb,
+	uint16_t *free_space)
 {
-	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+	if (unlikely(nb > ring->free_space))
+		return 0;
 
-	fd->simple_pci.len_sl = len;
+	if ((ring->tail + nb) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem, nb * sizeof(uint16_t));
+		ring->tail += nb;
+	} else {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem,
+			(DPAA2_QDMA_MAX_DESC - ring->tail) *
+			sizeof(uint16_t));
+		rte_memcpy(&ring->cntx_idx_ring[0],
+			&elem[DPAA2_QDMA_MAX_DESC - ring->tail],
+			(nb - DPAA2_QDMA_MAX_DESC + ring->tail) *
+			sizeof(uint16_t));
+		ring->tail = (ring->tail + nb) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space -= nb;
+	ring->nb_in_ring += nb;
 
-	fd->simple_pci.bmt = 1;
-	fd->simple_pci.fmt = 3;
-	fd->simple_pci.sl = 1;
-	fd->simple_pci.ser = ser;
+	if (free_space)
+		*free_space = ring->free_space;
 
-	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
-	fd->simple_pci.srbp = rbp->srbp;
-	if (rbp->srbp)
-		fd->simple_pci.rdttype = 0;
-	else
-		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+	return nb;
+}
 
-	/*dest is pcie memory */
-	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
-	fd->simple_pci.drbp = rbp->drbp;
-	if (rbp->drbp)
-		fd->simple_pci.wrttype = 0;
-	else
-		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+static inline int
+qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring,
+	uint16_t *elem, uint16_t max)
+{
+	int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring;
 
-	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (!ret)
+		return 0;
 
-	return 0;
+	if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			ret * sizeof(uint16_t));
+		ring->start += ret;
+	} else {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			(DPAA2_QDMA_MAX_DESC - ring->start) *
+			sizeof(uint16_t));
+		rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start],
+			&ring->cntx_idx_ring[0],
+			(ret - DPAA2_QDMA_MAX_DESC + ring->start) *
+			sizeof(uint16_t));
+		ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space += ret;
+	ring->nb_in_ring -= ret;
+
+	return ret;
 }
 
-static inline int
-qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd, int ser)
+static int
+dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq)
 {
-	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
-
-	fd->simple_ddr.len = len;
-
-	fd->simple_ddr.bmt = 1;
-	fd->simple_ddr.fmt = 3;
-	fd->simple_ddr.sl = 1;
-	fd->simple_ddr.ser = ser;
-	/**
-	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
-	 * Coherent copy of cacheable memory,
-	* lookup in downstream cache, no allocate
-	 * on miss
-	 */
-	fd->simple_ddr.rns = 0;
-	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
-	/**
-	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
-	 * Coherent write of cacheable memory,
-	 * lookup in downstream cache, no allocate on miss
-	 */
-	fd->simple_ddr.wns = 0;
-	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
+	struct qbman_eq_desc eqdesc;
+	struct qbman_swp *swp;
+	uint32_t num_to_send = 0;
+	uint16_t num_tx = 0;
+	uint32_t enqueue_loop, loop;
+	int ret;
+	struct qbman_fd *fd = qdma_vq->fd;
+	uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx;
 
-	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d",
+				rte_gettid());
+			return -EIO;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
 
-	return 0;
+	/* Prepare enqueue descriptor */
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_fq(&eqdesc, txq_id);
+	qbman_eq_desc_set_no_orp(&eqdesc, 0);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+	while (nb_fds > 0) {
+		num_to_send = (nb_fds > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_fds;
+
+		/* Enqueue the packet to the QBMAN */
+		enqueue_loop = 0;
+		loop = num_to_send;
+
+		while (enqueue_loop < loop) {
+			ret = qbman_swp_enqueue_multiple(swp,
+				&eqdesc,
+				&fd[num_tx + enqueue_loop],
+				NULL,
+				loop - enqueue_loop);
+			if (likely(ret >= 0))
+				enqueue_loop += ret;
+		}
+		num_tx += num_to_send;
+		nb_fds -= loop;
+	}
+
+	qdma_vq->num_enqueues += num_tx;
+	if (unlikely(num_tx != qdma_vq->fd_idx)) {
+		dst_idx = 0;
+		for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) {
+			rte_memcpy(&qdma_vq->fd[dst_idx],
+				&qdma_vq->fd[idx],
+				sizeof(struct qbman_fd));
+			dst_idx++;
+		}
+	}
+	qdma_vq->fd_idx -= num_tx;
+
+	return num_tx;
 }
 
 static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			uint64_t fle_iova,
-			struct dpaa2_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags, uint32_t fmt)
+fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
+	struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest,
+	uint32_t fmt)
 {
-	struct qdma_sdd *sdd;
-	uint64_t sdd_iova;
-
-	sdd = (struct qdma_sdd *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
-			QDMA_FLE_SDD_OFFSET);
-	sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
+	struct qbman_fle *fle = fle_sdd->fle;
+	struct qdma_sdd *sdd = fle_sdd->sdd;
+	uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd);
 
 	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, sdd_iova);
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)));
 
 	/* source and destination descriptor */
 	if (rbp && rbp->enable) {
 		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
 		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
 		}
-		sdd++;
 		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
 		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
 		}
-
 	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
 	}
-	fle++;
 	/* source frame list to source buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
 #endif
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
-	fle++;
 	/* destination frame list to destination buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
 #endif
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
 }
 
-static inline int
-dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
-		     struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_job **job,
-		     uint16_t nb_jobs)
+static void
+sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 {
+	uint16_t i;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+		/* source SG */
+		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+		/* destination SG */
+		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+	}
+}
+
+static void
+fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx,
+	struct qdma_virt_queue *qdma_vq)
+{
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	size_t iova;
-	int ret = 0, loop;
-	int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
-				0 : 1;
-
-	for (loop = 0; loop < nb_jobs; loop++) {
-		if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
-			iova = (size_t)job[loop]->dest;
-		else
-			iova = (size_t)job[loop]->src;
-
-		/* Set the metadata */
-		job[loop]->vq_id = qdma_vq->vq_id;
-		ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-		*ppjob = job[loop];
-
-		if ((rbp->drbp == 1) || (rbp->srbp == 1))
-			ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], rbp, ser);
-		else
-			ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], ser);
+
+	memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg));
+
+	src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge);
+	dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge);
+
+	sg_entry_pre_populate(sg_cntx);
+	fle_sdd_pre_populate(&sg_cntx->fle_sdd,
+		rbp, src_sge_iova, dst_sge_iova,
+		QBMAN_FLE_WORD4_FMT_SGE);
+}
+
+static inline uint32_t
+sg_entry_post_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i = 0, idx;
+	uint32_t total_len = 0, len;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < (nb_sge - 1); i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = len;
+
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = len;
+		total_len += len;
+		sg_cntx->cntx_idx[i] = idx;
+
+		src_sge->ctrl.f = 0;
+		dst_sge->ctrl.f = 0;
+		src_sge++;
+		dst_sge++;
 	}
 
-	return ret;
+	if (unlikely(src[i].length != dst[i].length))
+		return -ENOTSUP;
+
+	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+	idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+
+	src_sge->addr_lo = (uint32_t)src[i].addr;
+	src_sge->addr_hi = (src[i].addr >> 32);
+	src_sge->data_len.data_len_sl0 = len;
+
+	dst_sge->addr_lo = (uint32_t)dst[i].addr;
+	dst_sge->addr_hi = (dst[i].addr >> 32);
+	dst_sge->data_len.data_len_sl0 = len;
+
+	total_len += len;
+	sg_cntx->cntx_idx[i] = idx;
+	sg_cntx->job_nb = nb_sge;
+
+	src_sge->ctrl.f = QDMA_SG_F;
+	dst_sge->ctrl.f = QDMA_SG_F;
+
+	return total_len;
 }
 
-static uint32_t
-qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
-		       struct qdma_sg_entry *src_sge,
-		       struct qdma_sg_entry *dst_sge,
-		       uint16_t nb_jobs)
+static inline void
+sg_fle_post_populate(struct qbman_fle fle[],
+	size_t len)
 {
-	uint16_t i;
-	uint32_t total_len = 0;
-	uint64_t iova;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
+}
 
-	for (i = 0; i < nb_jobs; i++) {
-		/* source SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
-			src_sge->addr_lo = (uint32_t)jobs[i]->src;
-			src_sge->addr_hi = (jobs[i]->src >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
-			src_sge->addr_lo = (uint32_t)iova;
-			src_sge->addr_hi = iova >> 32;
-		}
-		src_sge->data_len.data_len_sl0 = jobs[i]->len;
+static inline uint32_t
+sg_entry_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i, idx;
+	uint32_t total_len = 0, len;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < nb_sge; i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
+		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
+
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = len;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -235,16 +362,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		/* destination SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
-			dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
-			dst_sge->addr_hi = (jobs[i]->dest >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
-			dst_sge->addr_lo = (uint32_t)iova;
-			dst_sge->addr_hi = iova >> 32;
-		}
-		dst_sge->data_len.data_len_sl0 = jobs[i]->len;
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = len;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -252,9 +372,10 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += jobs[i]->len;
+		total_len += len;
+		sg_cntx->cntx_idx[i] = idx;
 
-		if (i == (nb_jobs - 1)) {
+		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
 			dst_sge->ctrl.f = QDMA_SG_F;
 		} else {
@@ -265,327 +386,432 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 		dst_sge++;
 	}
 
+	sg_cntx->job_nb = nb_sge;
+
 	return total_len;
 }
 
-static inline int
-dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
-				  struct qbman_fd *fd,
-				  struct rte_dpaa2_qdma_job **job,
-				  uint16_t nb_jobs)
+static inline void
+fle_populate(struct qbman_fle fle[],
+	struct qdma_sdd sdd[], uint64_t sdd_iova,
+	struct dpaa2_qdma_rbp *rbp,
+	uint64_t src_iova, uint64_t dst_iova, size_t len,
+	uint32_t fmt)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
-	void *elem;
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		(DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))));
 
-	for (i = 0; i < nb_jobs; i++) {
-		elem = job[i]->usr_elem;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem);
-#endif
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+		if (rbp->srbp) {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
+		}
+		/* destination */
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
-		job[i]->vq_id = qdma_vq->vq_id;
+		if (rbp->drbp) {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
+		}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	} else {
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
+	}
+	/* source frame list to source buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+#endif
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
+	/* destination frame list to destination buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+#endif
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-				DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
+}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-			job[i]->src, job[i]->dest, job[i]->len,
-			job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
-	}
+static inline void
+fle_post_populate(struct qbman_fle fle[],
+	uint64_t src, uint64_t dest, size_t len)
+{
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-	return 0;
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
 static inline int
-dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
-			   struct qbman_fd *fd,
-			   struct rte_dpaa2_qdma_job **job,
-			   uint16_t nb_jobs)
+dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	uint16_t expected = qdma_vq->fd_idx;
 	int ret;
-	void *elem[DPAA2_QDMA_MAX_DESC];
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
 
-	ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
+	ret = dpaa2_qdma_multi_eq(qdma_vq);
+	if (likely(ret == expected))
+		return 0;
 
-	for (i = 0; i < nb_jobs; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem[i]);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
-#endif
+	return -EBUSY;
+}
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem[i] +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+static inline void
+dpaa2_qdma_fle_dump(const struct qbman_fle *fle)
+{
+	DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d",
+		fle->addr_hi, fle->addr_lo, fle->length, fle->frc,
+		fle->word4.bpid);
+	DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d",
+		fle->word4.ivp, fle->word4.bmt, fle->word4.offset,
+		fle->word4.fmt, fle->word4.sl, fle->word4.f);
+}
 
-		job[i]->vq_id = qdma_vq->vq_id;
+static inline void
+dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd)
+{
+	DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x",
+		sdd->stride, sdd->rbpcmd, sdd->cmd);
+}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+static inline void
+dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge)
+{
+	DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x",
+		sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0,
+		sge->ctrl_fields);
+}
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
-		DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
+static void
+dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
+{
+	int i;
+	const struct qdma_cntx_fle_sdd *fle_sdd;
+	const struct qdma_sdd *sdd;
+	const struct qdma_cntx_sg *cntx_sg = NULL;
+	const struct qdma_cntx_long *cntx_long = NULL;
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
+	sdd = fle_sdd->sdd;
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-				job[i]->src, job[i]->dest, job[i]->len,
-				job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
+	for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) {
+		DPAA2_QDMA_INFO("fle[%d] info:", i);
+		dpaa2_qdma_fle_dump(&fle[i]);
 	}
 
-	return 0;
+	if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
+		fle[DPAA2_QDMA_DST_FLE].word4.fmt) {
+		DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)",
+			DPAA2_QDMA_SRC_FLE,
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt,
+			DPAA2_QDMA_DST_FLE,
+			fle[DPAA2_QDMA_DST_FLE].word4.fmt);
+
+		return;
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SGE) {
+		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
+			fle_sdd);
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SBF) {
+		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
+			fle_sdd);
+	} else {
+		DPAA2_QDMA_ERR("Unsupported fle format:%d",
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
+		return;
+	}
+
+	for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) {
+		DPAA2_QDMA_INFO("sdd[%d] info:", i);
+		dpaa2_qdma_sdd_dump(&sdd[i]);
+	}
+
+	if (cntx_long) {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
+			cntx_long->cntx_idx);
+	}
+
+	if (cntx_sg) {
+		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
+			cntx_sg->job_nb);
+		if (!cntx_sg->job_nb ||
+			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			DPAA2_QDMA_ERR("Invalid SG job number:%d",
+				cntx_sg->job_nb);
+			return;
+		}
+		for (i = 0; i < cntx_sg->job_nb; i++) {
+			DPAA2_QDMA_INFO("sg[%d] src info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]);
+			DPAA2_QDMA_INFO("sg[%d] dst info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]);
+			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
+				cntx_sg->cntx_idx[i]);
+		}
+	}
 }
 
-static inline int
-dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
-			struct qbman_fd *fd,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
+static int
+dpaa2_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	void *elem;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	uint32_t cntx_idx, len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_sg *cntx_sg;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova, src, dst;
-	int ret = 0, i;
-	struct qdma_sg_entry *src_sge, *dst_sge;
-	uint32_t len, fmt, flags;
-
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
-		elem = job[0]->usr_elem;
+	struct qdma_sdd *sdd;
+
+	if (unlikely(nb_src != nb_dst))
+		return -ENOTSUP;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_dev->is_silent) {
+		cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[0].length);
+		cntx_sg = qdma_vq->cntx_sg[cntx_idx];
 	} else {
-		ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
-		if (ret) {
-			DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_sg);
+		if (ret)
 			return ret;
-		}
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	elem_iova = rte_mempool_virt2iova(elem);
+	cntx_iova = rte_mempool_virt2iova(cntx_sg);
 #else
-	elem_iova = DPAA2_VADDR_TO_IOVA(elem);
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
 #endif
 
-	/* Set the metadata */
-	/* Save job context. */
-	*((uint16_t *)
-	((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
-	ppjob = (struct rte_dpaa2_qdma_job **)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
-	for (i = 0; i < nb_jobs; i++)
-		ppjob[i] = job[i];
-
-	ppjob[0]->vq_id = qdma_vq->vq_id;
-
-	fle = (struct qbman_fle *)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-	fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	fle = cntx_sg->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_sg, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
 	DPAA2_SET_FD_ADDR(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
-		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length))
+			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
 
-	/* Populate FLE */
-	if (likely(nb_jobs > 1)) {
-		src_sge = (struct qdma_sg_entry *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
-		dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
-		src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
-		dst = src +
-			DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
-		len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
-		fmt = QBMAN_FLE_WORD4_FMT_SGE;
-		flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
+		len = sg_entry_post_populate(src, dst,
+			cntx_sg, nb_src);
+		sg_fle_post_populate(fle, len);
 	} else {
-		src = job[0]->src;
-		dst = job[0]->dest;
-		len = job[0]->len;
-		fmt = QBMAN_FLE_WORD4_FMT_SBF;
-		flags = job[0]->flags;
+		sdd = cntx_sg->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		src_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_src_entry);
+		dst_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_dst_entry);
+		len = sg_entry_populate(src, dst,
+			cntx_sg, nb_src);
+
+		fle_populate(fle, sdd, sdd_iova,
+			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
+			QBMAN_FLE_WORD4_FMT_SGE);
 	}
 
-	memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
-
-	dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-					src, dst, len, flags, fmt);
-
-	return 0;
-}
-
-static inline uint16_t
-dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
-		      const struct qbman_fd *fd,
-		      struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
-{
-	uint16_t vqid;
-	size_t iova;
-	struct rte_dpaa2_qdma_job **ppjob;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
-	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
-		iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
-				| (uint64_t)fd->simple_pci.daddr_lo);
-	else
-		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
-				| (uint64_t)fd->simple_pci.saddr_lo);
+	qdma_vq->fd_idx++;
 
-	ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-	*job = (struct rte_dpaa2_qdma_job *)*ppjob;
-	(*job)->status = (fd->simple_pci.acc_err << 8) |
-					(fd->simple_pci.error);
-	vqid = (*job)->vq_id;
-	*nb_jobs = 1;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return vqid;
+	return ret;
 }
 
-static inline uint16_t
-dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-	const struct qbman_fd *fd,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t *nb_jobs)
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	uint16_t cntx_idx;
+	uint32_t len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_long *cntx_long;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t status;
+	struct qdma_sdd *sdd;
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	memset(fd, 0, sizeof(struct qbman_fd));
 
-	*nb_jobs = 1;
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-			QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
+	cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length);
+	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length);
 
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
+	if (qdma_dev->is_silent) {
+		cntx_long = qdma_vq->cntx_long[cntx_idx];
+	} else {
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_long);
+		if (ret)
+			return ret;
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		cntx_long->cntx_idx = cntx_idx;
+	}
 
-	*job = *ppjob;
-	(*job)->status = status;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	cntx_iova = rte_mempool_virt2iova(cntx_long);
+#else
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
+#endif
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	fle = cntx_long->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_long, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	return (*job)->vq_id;
-}
+	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
 
-static inline uint16_t
-dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-	const struct qbman_fd *fd,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t *nb_jobs)
-{
-	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t i, status;
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+				&qdma_vq->rbp,
+				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+		}
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	*nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	for (i = 0; i < (*nb_jobs); i++) {
-		job[i] = ppjob[i];
-		job[i]->status = status;
+		fle_post_populate(fle, src, dst, len);
+	} else {
+		sdd = cntx_long->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_long, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
+			src, dst, len,
+			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
-	return job[0]->vq_id;
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
+
+	return ret;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
-static int
-dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-	uint16_t *vq_id,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t nb_jobs)
+static uint16_t
+dpaa2_qdma_dequeue(void *dev_private,
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *cntx_idx, bool *has_error)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
 	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
 	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
 	uint8_t status, pending;
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid;
 	int ret, pull_size;
+	struct qbman_fle *fle;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_long *cntx_long;
+	uint16_t free_space = 0, fle_elem_nb = 0;
 
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
+	if (unlikely(qdma_dev->is_silent))
+		return 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
+			DPAA2_QDMA_ERR("Allocate portal err, tid(%d)",
 				rte_gettid());
+			if (has_error)
+				*has_error = true;
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
-	rx_fqid = rxq->fqid;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ?
-		dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_cpls > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_cpls;
+	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
+	fqid = rxq->fqid;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -594,21 +820,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->last_num_pkts = pull_size;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      q_storage->last_num_pkts);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+			q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
-				get_swp_active_dqs(
-				DPAA2_PER_LCORE_DPIO->index)))
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
 				;
 			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued.QBMAN busy\n");
+				DPAA2_QDMA_DP_WARN("QBMAN busy");
 					/* Portal was busy, try again */
 				continue;
 			}
@@ -617,7 +842,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->active_dqs = dq_storage;
 		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
-				   dq_storage);
+			dq_storage);
 	}
 
 	dq_storage = q_storage->active_dqs;
@@ -631,7 +856,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
-	qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
 
@@ -664,27 +889,40 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-
-		vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
-								&num_rx_ret);
-		if (vq_id)
-			vq_id[num_rx] = vqid;
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		fle = fle_sdd->fle;
+		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
+		fle_elem_nb++;
+		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+			QBMAN_FLE_WORD4_FMT_SGE) {
+			cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, &free_space);
+		} else {
+			cntx_long = container_of(fle_sdd,
+				struct qdma_cntx_long, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&cntx_long->cntx_idx,
+				1, &free_space);
+		}
+		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+			pending = 0;
 
 		dq_storage++;
-		num_rx += num_rx_ret;
 	} while (pending);
 
 	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 		while (!qbman_check_command_complete(
-			get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
 			;
 		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 	}
 	/* issue a volatile dequeue command for next pull */
 	while (1) {
 		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN(
-				"VDQ command is not issued. QBMAN is busy (2)\n");
+			DPAA2_QDMA_DP_WARN("QBMAN is busy (2)");
 			continue;
 		}
 		break;
@@ -694,387 +932,18 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	return num_rx;
-}
-
-static int
-dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-	uint16_t *vq_id,
-	struct rte_dpaa2_qdma_job **job,
-	uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq;
-	struct qbman_result *dq_storage;
-	struct qbman_pull_desc pulldesc;
-	struct qbman_swp *swp;
-	uint8_t status, pending;
-	uint8_t num_rx = 0;
-	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid;
-	int ret, next_pull, num_pulled = 0;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
-
-	next_pull = nb_jobs;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid(%d)",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	rxq = &dpdmai_dev->rx_queue[qdma_vq->vq_id];
-	rx_fqid = rxq->fqid;
+	rte_mempool_put_bulk(qdma_vq->fle_pool,
+		qdma_vq->fle_elem, fle_elem_nb);
 
-	do {
-		dq_storage = rxq->q_storage->dq_storage[0];
-		/* Prepare dequeue descriptor */
-		qbman_pull_desc_clear(&pulldesc);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
-		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
+		cntx_idx, nb_cpls);
 
-		if (next_pull > dpaa2_dqrr_size) {
-			qbman_pull_desc_set_numframes(&pulldesc,
-					dpaa2_dqrr_size);
-			next_pull -= dpaa2_dqrr_size;
-		} else {
-			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
-			next_pull = 0;
-		}
-
-		while (1) {
-			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued. QBMAN busy");
-				/* Portal was busy, try again */
-				continue;
-			}
-			break;
-		}
-
-		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-		/* Check if the previous issued command is completed. */
-		while (!qbman_check_command_complete(dq_storage))
-			;
-
-		num_pulled = 0;
-		pending = 1;
-
-		do {
-			/* Loop until dq_storage is updated
-			 * with new token by QBMAN
-			 */
-			while (!qbman_check_new_result(dq_storage))
-				;
-			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-
-			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-				pending = 0;
-				/* Check for valid frame. */
-				status = qbman_result_DQ_flags(dq_storage);
-				if (unlikely((status &
-					QBMAN_DQ_STAT_VALIDFRAME) == 0))
-					continue;
-			}
-			fd = qbman_result_DQ_fd(dq_storage);
-
-			vqid = qdma_vq->get_job(qdma_vq, fd,
-						&job[num_rx], &num_rx_ret);
-			if (vq_id)
-				vq_id[num_rx] = vqid;
-
-			dq_storage++;
-			num_rx += num_rx_ret;
-			num_pulled++;
-
-		} while (pending);
-	/* Last VDQ provided all packets and more packets are requested */
-	} while (next_pull && num_pulled == dpaa2_dqrr_size);
+	if (has_error)
+		*has_error = false;
 
 	return num_rx;
 }
 
-static int
-dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
-	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
-	struct qbman_eq_desc eqdesc;
-	struct qbman_swp *swp;
-	uint32_t num_to_send = 0;
-	uint16_t num_tx = 0;
-	uint32_t enqueue_loop, loop;
-	int ret;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d\n",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	/* Prepare enqueue descriptor */
-	qbman_eq_desc_clear(&eqdesc);
-	qbman_eq_desc_set_fq(&eqdesc, txq_id);
-	qbman_eq_desc_set_no_orp(&eqdesc, 0);
-	qbman_eq_desc_set_response(&eqdesc, 0, 0);
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		uint16_t fd_nb;
-		uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		uint16_t job_idx = 0;
-		uint16_t fd_sg_nb[8];
-		uint16_t nb_jobs_ret = 0;
-
-		if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
-		else
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
-
-		memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
-
-		for (loop = 0; loop < fd_nb; loop++) {
-			ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
-					      sg_entry_nb);
-			if (unlikely(ret < 0))
-				return 0;
-			fd_sg_nb[loop] = sg_entry_nb;
-			nb_jobs -= sg_entry_nb;
-			job_idx += sg_entry_nb;
-			sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		}
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-
-		while (enqueue_loop < fd_nb) {
-			ret = qbman_swp_enqueue_multiple(swp,
-					&eqdesc, &fd[enqueue_loop],
-					NULL, fd_nb - enqueue_loop);
-			if (likely(ret >= 0)) {
-				for (loop = 0; loop < (uint32_t)ret; loop++)
-					nb_jobs_ret +=
-						fd_sg_nb[enqueue_loop + loop];
-				enqueue_loop += ret;
-			}
-		}
-
-		return nb_jobs_ret;
-	}
-
-	memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
-
-	while (nb_jobs > 0) {
-		num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
-			dpaa2_eqcr_size : nb_jobs;
-
-		ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
-						&job[num_tx], num_to_send);
-		if (unlikely(ret < 0))
-			break;
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-		loop = num_to_send;
-
-		while (enqueue_loop < loop) {
-			ret = qbman_swp_enqueue_multiple(swp,
-						&eqdesc,
-						&fd[num_tx + enqueue_loop],
-						NULL,
-						loop - enqueue_loop);
-			if (likely(ret >= 0))
-				enqueue_loop += ret;
-		}
-		num_tx += num_to_send;
-		nb_jobs -= loop;
-	}
-
-	qdma_vq->num_enqueues += num_tx;
-
-	return num_tx;
-}
-
-static inline int
-dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
-				qdma_vq->num_valid_jobs);
-
-	qdma_vq->num_valid_jobs = 0;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *job;
-	int idx, ret;
-
-	idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
-
-	ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return -ENOSPC;
-	}
-
-	job->src = src;
-	job->dest = dst;
-	job->len = length;
-	job->flags = flags;
-	job->status = 0;
-	job->vq_id = vchan;
-
-	qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
-	qdma_vq->num_valid_jobs++;
-
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
-		dpaa2_qdma_submit(dev_private, vchan);
-
-	return idx;
-}
-
-int
-rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-	struct rte_dpaa2_qdma_job **jobs,
-	uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_virt_queue *qdma_vq,
-	struct rte_dpaa2_qdma_job **jobs,
-	uint16_t nb_jobs)
-{
-	int ret;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-	}
-
-	/* Only dequeue when there are pending jobs on VQ */
-	if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
-		return 0;
-
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
-		qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
-		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
-				qdma_vq->num_dequeues), nb_jobs);
-
-	ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-	if (ret < 0) {
-		DPAA2_QDMA_ERR("Dequeue from DMA%d-q%d failed(%d)",
-			qdma_vq->dpdmai_dev->dpdmai_id,
-			qdma_vq->vq_id, ret);
-		return ret;
-	}
-
-	qdma_vq->num_dequeues += ret;
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-	const uint16_t nb_cpls,
-	uint16_t *last_idx,
-	enum rte_dma_status_code *st)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret, i;
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
-
-	for (i = 0; i < ret; i++)
-		st[i] = jobs[i]->status;
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue(void *dev_private,
-	uint16_t vchan, const uint16_t nb_cpls,
-	uint16_t *last_idx, bool *has_error)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret;
-
-	RTE_SET_USED(has_error);
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_vq,
-				jobs, nb_cpls);
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-uint16_t
-rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-			       struct rte_dpaa2_qdma_job **jobs,
-			       uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpaa2_qdma_dequeue_multi(qdma_vq, jobs, nb_cpls);
-}
-
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	struct rte_dma_info *dev_info,
@@ -1119,80 +988,22 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 	/* Allocate Virtual Queues */
 	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
 	qdma_dev->vqs = rte_malloc(name,
-			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
-			RTE_CACHE_LINE_SIZE);
+		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+		RTE_CACHE_LINE_SIZE);
 	if (!qdma_dev->vqs) {
-		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+		DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.",
+			dev->data->dev_name, dev_conf->nb_vchans);
 		return -ENOMEM;
 	}
 	for (i = 0; i < dev_conf->nb_vchans; i++)
 		qdma_dev->vqs[i].vq_id = i;
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
+	qdma_dev->is_silent = dev_conf->enable_silent;
 
 	return 0;
 }
 
-static int
-check_devargs_handler(__rte_unused const char *key,
-		      const char *value,
-		      __rte_unused void *opaque)
-{
-	if (strcmp(value, "1"))
-		return -1;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
-{
-	struct rte_kvargs *kvlist;
-
-	if (!devargs)
-		return 0;
-
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (!kvlist)
-		return 0;
-
-	if (!rte_kvargs_count(kvlist, key)) {
-		rte_kvargs_free(kvlist);
-		return 0;
-	}
-
-	if (rte_kvargs_process(kvlist, key,
-			       check_devargs_handler, NULL) < 0) {
-		rte_kvargs_free(kvlist);
-		return 0;
-	}
-	rte_kvargs_free(kvlist);
-
-	return 1;
-}
-
-/* Enable FD in Ultra Short format */
-void
-rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
-}
-
-/* Enable internal SG processing */
-void
-rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
-}
-
 static int
 dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 	const struct rte_dma_vchan_conf *conf)
@@ -1236,8 +1047,8 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
 	char pool_name[64];
-	int fd_long_format = 1;
-	int sg_enable = 0, ret;
+	int ret;
+	char *env = NULL;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1247,85 +1058,70 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	if (ret)
 		return ret;
 
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
-		sg_enable = 1;
-
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
-		fd_long_format = 0;
-
-	if (dev->data->dev_conf.enable_silent)
-		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
+	/**Default enable FLE PRE POPULATE*/
+	env = getenv("DPAA2_QDMA_FLE_PRE_POPULATE");
+	if (env)
+		qdma_dev->vqs[vchan].fle_pre_populate = atoi(env);
+	else
+		qdma_dev->vqs[vchan].fle_pre_populate = 1;
 
-	if (sg_enable) {
-		if (qdma_dev->num_vqs != 1) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports physical queue!");
-			return -ENODEV;
-		}
-		if (!fd_long_format) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports long FD format!");
-			return -ENODEV;
-		}
-		pool_size = QDMA_FLE_SG_POOL_SIZE;
-	} else {
-		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
-	}
+	env = getenv("DPAA2_QDMA_DESC_DEBUG");
+	if (env && atoi(env))
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG;
+	else
+		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
+			    sizeof(struct qdma_cntx_long));
+
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+			DPAA2_QDMA_MAX_DESC * 2, pool_size,
+			512, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
 	if (!qdma_dev->vqs[vchan].fle_pool) {
-		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
-		return -ENOMEM;
-	}
-
-	snprintf(pool_name, sizeof(pool_name),
-		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev->vqs[vchan].job_pool) {
-		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
 
-	if (fd_long_format) {
-		if (sg_enable) {
-			qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
-		} else {
-			if (dev->data->dev_conf.enable_silent)
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf_no_rsp;
-			else
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
+	if (qdma_dev->is_silent) {
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_sg,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("sg cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
+		}
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_long,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
 		}
 	} else {
-		qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
-		qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
-	}
-
-	if (dpaa2_qdma_get_devargs(dev->device->devargs,
-			DPAA2_QDMA_PREFETCH)) {
-		/* If no prefetch is configured. */
-		qdma_dev->vqs[vchan].dequeue_job =
-				dpdmai_dev_dequeue_multijob_prefetch;
-		DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
-	} else {
-		qdma_dev->vqs[vchan].dequeue_job =
-			dpdmai_dev_dequeue_multijob_no_prefetch;
+		qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL,
+				sizeof(struct qdma_cntx_idx_ring),
+				RTE_CACHE_LINE_SIZE);
+		if (!qdma_dev->vqs[vchan].ring_cntx_idx) {
+			DPAA2_QDMA_ERR("DQ response ring alloc failed.");
+			return -ENOMEM;
+		}
+		qdma_dev->vqs[vchan].ring_cntx_idx->start = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->free_space =
+				QDMA_CNTX_IDX_RING_MAX_FREE;
+		qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0;
+		qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL,
+				sizeof(void *) * DPAA2_QDMA_MAX_DESC,
+				RTE_CACHE_LINE_SIZE);
 	}
 
 	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
 	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
-	qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
 
 	return 0;
 }
@@ -1374,9 +1170,12 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
-		    qdma_dev->vqs[i].num_dequeues)) {
-			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+		if ((qdma_dev->vqs[i].num_enqueues !=
+		    qdma_dev->vqs[i].num_dequeues) &&
+		    !qdma_dev->is_silent) {
+			DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")",
+				i, qdma_dev->vqs[i].num_enqueues,
+				qdma_dev->vqs[i].num_dequeues);
 			return -EBUSY;
 		}
 	}
@@ -1618,7 +1417,7 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 
 static int
 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
-		 struct rte_dpaa2_device *dpaa2_dev)
+	struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_dma_dev *dmadev;
 	int ret;
@@ -1628,8 +1427,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	RTE_SET_USED(dpaa2_drv);
 
 	dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
-				      rte_socket_id(),
-				      sizeof(struct dpaa2_dpdmai_dev));
+		rte_socket_id(),
+		sizeof(struct dpaa2_dpdmai_dev));
 	if (!dmadev) {
 		DPAA2_QDMA_ERR("Unable to allocate dmadevice");
 		return -EINVAL;
@@ -1639,10 +1438,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	dmadev->dev_ops = &dpaa2_qdma_ops;
 	dmadev->device = &dpaa2_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
-	dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
+	dmadev->fp_obj->copy = dpaa2_qdma_copy;
+	dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa2_qdma_submit;
 	dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
-	dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
 	dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 786dcb9308..ee34532408 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -5,7 +5,7 @@
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
-#define DPAA2_QDMA_MAX_DESC		1024
+#define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
@@ -13,48 +13,9 @@
 #define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
 #define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
 
-#define DPAA2_QDMA_MAX_FLE 3
-#define DPAA2_QDMA_MAX_SDD 2
-
-#define DPAA2_QDMA_MAX_SG_NB 64
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-/** FLE single job pool size: job pointer(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor.
- */
-#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
-			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-/** FLE sg jobs pool size: job number(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor  +
- * 64 (src + dst) sg entries + 64 jobs pointers.
- */
-#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
-		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
-		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
-
-#define QDMA_FLE_JOB_NB_OFFSET 0
-
-#define QDMA_FLE_SINGLE_JOB_OFFSET 0
-
-#define QDMA_FLE_FLE_OFFSET \
-		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
-
-#define QDMA_FLE_SDD_OFFSET \
-		(QDMA_FLE_FLE_OFFSET + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
-
-#define QDMA_FLE_SG_ENTRY_OFFSET \
-		(QDMA_FLE_SDD_OFFSET + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-#define QDMA_FLE_SG_JOBS_OFFSET \
-		(QDMA_FLE_SG_ENTRY_OFFSET + \
-		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
+#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
 
 /** FLE pool cache size */
 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
@@ -202,6 +163,39 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum {
+	DPAA2_QDMA_SDD_FLE,
+	DPAA2_QDMA_SRC_FLE,
+	DPAA2_QDMA_DST_FLE,
+	DPAA2_QDMA_MAX_FLE
+};
+
+enum {
+	DPAA2_QDMA_SRC_SDD,
+	DPAA2_QDMA_DST_SDD,
+	DPAA2_QDMA_MAX_SDD
+};
+
+struct qdma_cntx_fle_sdd {
+	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
+	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
+} __rte_packed;
+
+struct qdma_cntx_sg {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t job_nb;
+	uint16_t rsv[3];
+} __rte_packed;
+
+struct qdma_cntx_long {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	uint16_t cntx_idx;
+	uint16_t rsv[3];
+} __rte_packed;
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -221,27 +215,18 @@ struct dpaa2_dpdmai_dev {
 	struct qdma_device *qdma_dev;
 };
 
-struct qdma_virt_queue;
-
-typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
-					const struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t *nb_jobs);
-typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
-					struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs);
-
-typedef int (qdma_dequeue_multijob_t)(
-				struct qdma_virt_queue *qdma_vq,
-				uint16_t *vq_id,
-				struct rte_dpaa2_qdma_job **job,
-				uint16_t nb_jobs);
+#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
+#define QDMA_CNTX_IDX_RING_MAX_FREE \
+	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
+struct qdma_cntx_idx_ring {
+	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
+	uint16_t start;
+	uint16_t tail;
+	uint16_t free_space;
+	uint16_t nb_in_ring;
+};
 
-typedef int (qdma_enqueue_multijob_t)(
-			struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs);
+#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
@@ -249,10 +234,11 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
-	uint8_t in_use;
+	uint8_t fle_pre_populate;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
@@ -262,18 +248,17 @@ struct qdma_virt_queue {
 
 	uint16_t vq_id;
 	uint32_t flags;
+	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
+	uint16_t fd_idx;
+	struct qdma_cntx_idx_ring *ring_cntx_idx;
+
+	/**Used for silent enabled*/
+	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
 
-	struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
-	struct rte_mempool *job_pool;
 	int num_valid_jobs;
 
 	struct rte_dma_stats stats;
-
-	qdma_set_fd_t *set_fd;
-	qdma_get_job_t *get_job;
-
-	qdma_dequeue_multijob_t *dequeue_job;
-	qdma_enqueue_multijob_t *enqueue_job;
 };
 
 /** Represents a QDMA device. */
@@ -284,6 +269,7 @@ struct qdma_device {
 	uint16_t num_vqs;
 	/** Device state - started or stopped */
 	uint8_t state;
+	uint8_t is_silent;
 };
 
 #endif /* _DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index b0bf9d8bcc..729bff42bb 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,118 +7,19 @@
 
 #include <rte_compat.h>
 
-/** States if the source addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_SRC_PHY		(1ULL << 30)
+#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
+#define RTE_DPAA2_QDMA_LEN_MASK \
+	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/** States if the destination addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
+#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
+	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
 
-/** Determines a QDMA job */
-struct rte_dpaa2_qdma_job {
-	/** Source Address from where DMA is (to be) performed */
-	uint64_t src;
-	/** Destination Address where DMA is (to be) done */
-	uint64_t dest;
-	/** Length of the DMA operation in bytes. */
-	uint32_t len;
-	/** See RTE_QDMA_JOB_ flags */
-	uint32_t flags;
-	/**
-	 * Status of the transaction.
-	 * This is filled in the dequeue operation by the driver.
-	 * upper 8bits acc_err for route by port.
-	 * lower 8bits fd error
-	 */
-	uint16_t status;
-	uint16_t vq_id;
-	uint64_t cnxt;
-	/**
-	 * FLE pool element maintained by user, in case no qDMA response.
-	 * Note: the address must be allocated from DPDK memory pool.
-	 */
-	void *usr_elem;
-};
+#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
+	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable FD in Ultra Short format on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable internal SG processing on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
+	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enqueue a copy operation onto the virtual DMA channel for silent mode,
- * when dequeue is not required.
- *
- * This queues up a copy operation to be performed by hardware, if the 'flags'
- * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
- * this operation, otherwise do not trigger doorbell.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs to be submitted to QDMA.
- * @param nb_cpls
- *   Number of DMA jobs.
- *
- * @return
- *   - >= 0..Number of enqueued job.
- *   - -ENOSPC: if no space left to enqueue.
- *   - other values < 0 on failure.
- */
-__rte_experimental
-int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Return the number of operations that have been successfully completed.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs completed by QDMA.
- * @param nb_cpls
- *   Number of completed DMA jobs.
- *
- * @return
- *   The number of operations that successfully completed. This return value
- *   must be less than or equal to the value of nb_cpls.
- */
-__rte_experimental
-uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
 
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
deleted file mode 100644
index eb012cfbfc..0000000000
--- a/drivers/dma/dpaa2/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-DPDK_24 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 22.07
-	rte_dpaa2_qdma_completed_multi;
-	rte_dpaa2_qdma_copy_multi;
-	rte_dpaa2_qdma_vchan_fd_us_enable;
-	rte_dpaa2_qdma_vchan_internal_sg_enable;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 04/30] dma/dpaa2: multiple process support
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (2 preceding siblings ...)
  2024-07-22 16:39     ` [v3 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
                       ` (25 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Anatoly Burakov; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Support multiple processes for dpaa2 dma.
1) Move queue configuration procedure from init function to
device configuration function which is called by user.

2) Instances of dpaa2_dpdmai_dev and qdma_device are allocated
from primary process and shared between multiple processes.

3) MC reg is per process mapped.

4) User is responsible to check vq number configured before using
dma device to identify if this device is occupied by other process.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 405 ++++++++++++++++++++-------------
 drivers/dma/dpaa2/dpaa2_qdma.h |   6 +-
 2 files changed, 254 insertions(+), 157 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 15d3776603..44b82c139e 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #include <rte_eal.h>
@@ -19,6 +19,8 @@
 static uint32_t dpaa2_coherent_no_alloc_cache;
 static uint32_t dpaa2_coherent_alloc_cache;
 
+static struct fsl_mc_io s_proc_mc_reg;
+
 static inline int
 qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
 	const uint16_t *elem, uint16_t nb,
@@ -960,6 +962,9 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->dev_name = dev->device->name;
+	if (dpdmai_dev->qdma_dev)
+		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
 
 	return 0;
 }
@@ -969,25 +974,102 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 	const struct rte_dma_conf *dev_conf,
 	uint32_t conf_sz)
 {
-	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint16_t i;
+	struct dpdmai_rx_queue_cfg rx_queue_cfg;
+	struct dpdmai_rx_queue_attr rx_attr;
+	struct dpdmai_tx_queue_attr tx_attr;
+	struct dpaa2_queue *rxq;
+	int ret = 0;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR("%s Not stopped, configure failed.",
-			dev->data->dev_name);
-		return -EBUSY;
+	if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+		DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+			dev->data->dev_name, dev_conf->nb_vchans,
+			dpdmai_dev->num_queues);
+
+		return -ENOTSUP;
+	}
+
+	if (qdma_dev->vqs) {
+		DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+			dev->data->dev_name,
+			qdma_dev->num_vqs, dev_conf->nb_vchans);
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if ((qdma_dev->vqs[i].num_enqueues !=
+				qdma_dev->vqs[i].num_dequeues) &&
+				!qdma_dev->is_silent) {
+				DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+					i, qdma_dev->vqs[i].num_enqueues -
+					qdma_dev->vqs[i].num_dequeues);
+				return -EBUSY;
+			}
+		}
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+			rxq = &dpdmai_dev->rx_queue[i];
+			if (rxq->q_storage) {
+				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+					dev->data->dev_name, i);
+				dpaa2_free_dq_storage(rxq->q_storage);
+				rte_free(rxq->q_storage);
+				rxq->q_storage = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+		qdma_dev->num_vqs = 0;
+	}
+
+	/* Set up Rx Queues */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		rxq = &dpdmai_dev->rx_queue[i];
+		ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			return ret;
+		}
+	}
+
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+		ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
 	}
 
 	/* Allocate Virtual Queues */
-	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
-	qdma_dev->vqs = rte_malloc(name,
+	qdma_dev->vqs = rte_zmalloc(NULL,
 		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
 		RTE_CACHE_LINE_SIZE);
 	if (!qdma_dev->vqs) {
@@ -995,13 +1077,50 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 			dev->data->dev_name, dev_conf->nb_vchans);
 		return -ENOMEM;
 	}
-	for (i = 0; i < dev_conf->nb_vchans; i++)
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		qdma_dev->vqs[i].vq_id = i;
+		rxq = &dpdmai_dev->rx_queue[i];
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq->q_storage = rte_zmalloc(NULL,
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
 	qdma_dev->is_silent = dev_conf->enable_silent;
 
 	return 0;
+
+alloc_failed:
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	rte_free(qdma_dev->vqs);
+	qdma_dev->vqs = NULL;
+	qdma_dev->num_vqs = 0;
+
+	return ret;
 }
 
 static int
@@ -1130,11 +1249,17 @@ static int
 dpaa2_qdma_start(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 1;
+	/* Enable the device */
+	ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1143,30 +1268,33 @@ static int
 dpaa2_qdma_stop(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 0;
+	/* Disable the device */
+	ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
 
 static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct dpaa2_queue *rxq;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR("%s Not stopped, reset failed.",
-			dev->data->dev_name);
-		return -EBUSY;
-	}
+	if (!qdma_dev)
+		return 0;
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
@@ -1180,8 +1308,31 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 		}
 	}
 
-	rte_free(qdma_dev->vqs);
-	qdma_dev->vqs = NULL;
+	/* Free RXQ storages */
+	for (i = 0; i < qdma_dev->num_vqs; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	if (qdma_dev->vqs) {
+		/* Free RXQ fle pool */
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+	}
 
 	/* Reset QDMA device structure */
 	qdma_dev->num_vqs = 0;
@@ -1190,18 +1341,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 }
 
 static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	dpaa2_qdma_reset(dev);
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1256,56 +1397,97 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpaa2_queue *rxq;
-	int ret, i;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai(%d) disable failed",
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
 			dpdmai_dev->dpdmai_id);
-	}
-
-	/* Set up the DQRR storage for Rx */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		return 0;
 	}
 
 	/* Close the device at underlying layer*/
-	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+	ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
 	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai(%d) close failed",
-			dpdmai_dev->dpdmai_id);
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+			dpdmai_dev->dpdmai_id, ret);
+
+		return ret;
+	}
+
+	if (qdma_dev) {
+		rte_free(qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
 	}
 
 	return ret;
 }
 
 static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpdmai_rx_queue_cfg rx_queue_cfg;
 	struct dpdmai_attr attr;
-	struct dpdmai_rx_queue_attr rx_attr;
-	struct dpdmai_tx_queue_attr tx_attr;
-	struct dpaa2_queue *rxq;
-	int ret, i;
+	int ret, err;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
+	if (!dpaa2_coherent_no_alloc_cache) {
+		if (dpaa2_svr_family == SVR_LX2160A) {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
+		} else {
+			dpaa2_coherent_no_alloc_cache =
+				DPAA2_COHERENT_NO_ALLOCATE_CACHE;
+			dpaa2_coherent_alloc_cache =
+				DPAA2_COHERENT_ALLOCATE_CACHE;
+		}
+	}
+
+	if (!s_proc_mc_reg.regs)
+		s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+			dpdmai_id);
+		if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+			DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+				dpdmai_id, dpdmai_dev->dpdmai_id);
+			return -EINVAL;
+		}
+		if (!dpdmai_dev->qdma_dev) {
+			DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+				dpdmai_id);
+			return -ENOMEM;
+		}
+		if (dpdmai_dev->qdma_dev->num_vqs) {
+			DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+				dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+		}
+
+		return 0;
+	}
+
 	/* Open DPDMAI device */
 	dpdmai_dev->dpdmai_id = dpdmai_id;
-	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL,
+
+	if (dpdmai_dev->qdma_dev) {
+		rte_free(dpdmai_dev->qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
+	dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
 		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
-	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+	if (!dpdmai_dev->qdma_dev) {
+		DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+			dpdmai_id);
+		return -ENOMEM;
+	}
+	ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
 			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
 	if (ret) {
 		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
@@ -1314,105 +1496,24 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 	}
 
 	/* Get DPDMAI attributes */
-	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+	ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
 			dpdmai_dev->token, &attr);
 	if (ret) {
 		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
 			__func__, dpdmai_dev->dpdmai_id, ret);
-		goto init_err;
-	}
-	dpdmai_dev->num_queues = attr.num_of_queues;
-
-	/* Set up Rx Queues */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-		ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				CMD_PRI_LOW,
-				dpdmai_dev->token,
-				i, 0, &rx_queue_cfg);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q%d set failed(%d)",
-				dev->data->dev_name, i, ret);
-			goto init_err;
-		}
-
-		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq = &dpdmai_dev->rx_queue[i];
-		rxq->q_storage = rte_malloc("dq_storage",
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s DQ info(Q%d) alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto init_err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s DQ storage(Q%d) alloc failed(%d)",
-				dev->data->dev_name, i, ret);
-			goto init_err;
-		}
-	}
-
-	/* Get Rx and Tx queues FQID's */
-	for (i = 0; i < dpdmai_dev->num_queues; i++) {
-		ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				dpdmai_dev->token, i, 0, &rx_attr);
-		if (ret) {
-			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
-				dpdmai_dev->dpdmai_id, i, ret);
-			goto init_err;
-		}
-		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
-
-		ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				dpdmai_dev->token, i, 0, &tx_attr);
-		if (ret) {
-			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
-				dpdmai_dev->dpdmai_id, i, ret);
-			goto init_err;
-		}
-		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
-	}
-
-	/* Enable the device */
-	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			    dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
-		goto init_err;
-	}
-
-	if (!dpaa2_coherent_no_alloc_cache) {
-		if (dpaa2_svr_family == SVR_LX2160A) {
-			dpaa2_coherent_no_alloc_cache =
-				DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
-			dpaa2_coherent_alloc_cache =
-				DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
-		} else {
-			dpaa2_coherent_no_alloc_cache =
-				DPAA2_COHERENT_NO_ALLOCATE_CACHE;
-			dpaa2_coherent_alloc_cache =
-				DPAA2_COHERENT_ALLOCATE_CACHE;
+		err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+		if (err) {
+			DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+				dpdmai_dev->dpdmai_id, err);
 		}
+		return ret;
 	}
+	dpdmai_dev->num_queues = attr.num_of_queues;
 
-	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
-
-	/* Reset the QDMA device */
-	ret = dpaa2_qdma_reset(dev);
-	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		goto init_err;
-	}
+	DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
 
 	return 0;
-init_err:
-	dpaa2_dpdmai_dev_uninit(dev);
-	return ret;
 }
 
 static int
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index ee34532408..743a43fa14 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -200,8 +200,6 @@ struct qdma_cntx_long {
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
 	TAILQ_ENTRY(dpaa2_qdma_device) next;
-	/** handle to DPDMAI object */
-	struct fsl_mc_io dpdmai;
 	/** HW ID for DPDMAI object */
 	uint32_t dpdmai_id;
 	/** Tocken of this device */
@@ -267,8 +265,6 @@ struct qdma_device {
 	struct qdma_virt_queue *vqs;
 	/** Total number of VQ's */
 	uint16_t num_vqs;
-	/** Device state - started or stopped */
-	uint8_t state;
 	uint8_t is_silent;
 };
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 05/30] dma/dpaa2: add sanity check for SG entry
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (3 preceding siblings ...)
  2024-07-22 16:39     ` [v3 04/30] dma/dpaa2: multiple process support Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 20:21       ` Stephen Hemminger
  2024-07-22 16:39     ` [v3 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
                       ` (24 subsequent siblings)
  29 siblings, 1 reply; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Make sure the SG entry number doesn't overflow.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 44b82c139e..7f6ebcb46b 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -615,8 +615,17 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
-	if (unlikely(nb_src != nb_dst))
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
+			nb_src, nb_dst);
 		return -ENOTSUP;
+	}
+
+	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
+			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+		return -EINVAL;
+	}
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 06/30] dma/dpaa2: include DPAA2 specific header files
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (4 preceding siblings ...)
  2024-07-22 16:39     ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
                       ` (23 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include dpaa2_hw_pvt.h and dpaa2_hw_dpio.h files

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 743a43fa14..eb02bff08f 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -5,6 +5,9 @@
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
 #define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (5 preceding siblings ...)
  2024-07-22 16:39     ` [v3 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
                       ` (22 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For copy_sg: pass job index lists.
For copy: pass job index.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 92 ++++++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  7 ++
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h | 15 ++++-
 3 files changed, 68 insertions(+), 46 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 7f6ebcb46b..7de4894b35 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -280,25 +280,22 @@ sg_entry_post_populate(const struct rte_dma_sge *src,
 	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
 	uint16_t nb_sge)
 {
-	uint16_t i = 0, idx;
-	uint32_t total_len = 0, len;
+	uint16_t i;
+	uint32_t total_len = 0;
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
 	for (i = 0; i < (nb_sge - 1); i++) {
 		if (unlikely(src[i].length != dst[i].length))
 			return -ENOTSUP;
-		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
 		src_sge->addr_lo = (uint32_t)src[i].addr;
 		src_sge->addr_hi = (src[i].addr >> 32);
-		src_sge->data_len.data_len_sl0 = len;
+		src_sge->data_len.data_len_sl0 = src[i].length;
 
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
-		dst_sge->data_len.data_len_sl0 = len;
-		total_len += len;
-		sg_cntx->cntx_idx[i] = idx;
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
+		total_len += dst[i].length;
 
 		src_sge->ctrl.f = 0;
 		dst_sge->ctrl.f = 0;
@@ -309,19 +306,15 @@ sg_entry_post_populate(const struct rte_dma_sge *src,
 	if (unlikely(src[i].length != dst[i].length))
 		return -ENOTSUP;
 
-	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-	idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
-
 	src_sge->addr_lo = (uint32_t)src[i].addr;
 	src_sge->addr_hi = (src[i].addr >> 32);
-	src_sge->data_len.data_len_sl0 = len;
+	src_sge->data_len.data_len_sl0 = src[i].length;
 
 	dst_sge->addr_lo = (uint32_t)dst[i].addr;
 	dst_sge->addr_hi = (dst[i].addr >> 32);
-	dst_sge->data_len.data_len_sl0 = len;
+	dst_sge->data_len.data_len_sl0 = dst[i].length;
 
-	total_len += len;
-	sg_cntx->cntx_idx[i] = idx;
+	total_len += dst[i].length;
 	sg_cntx->job_nb = nb_sge;
 
 	src_sge->ctrl.f = QDMA_SG_F;
@@ -343,20 +336,18 @@ sg_entry_populate(const struct rte_dma_sge *src,
 	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
 	uint16_t nb_sge)
 {
-	uint16_t i, idx;
-	uint32_t total_len = 0, len;
+	uint16_t i;
+	uint32_t total_len = 0;
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
 	for (i = 0; i < nb_sge; i++) {
 		if (unlikely(src[i].length != dst[i].length))
 			return -ENOTSUP;
-		len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(src[i].length);
-		idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[i].length);
 
 		src_sge->addr_lo = (uint32_t)src[i].addr;
 		src_sge->addr_hi = (src[i].addr >> 32);
-		src_sge->data_len.data_len_sl0 = len;
+		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -366,7 +357,7 @@ sg_entry_populate(const struct rte_dma_sge *src,
 #endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
-		dst_sge->data_len.data_len_sl0 = len;
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -374,8 +365,7 @@ sg_entry_populate(const struct rte_dma_sge *src,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += len;
-		sg_cntx->cntx_idx[i] = idx;
+		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
@@ -606,14 +596,15 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	int ret = 0, expected;
-	uint32_t cntx_idx, len;
+	int ret = 0, expected, i;
+	uint32_t len;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_sg *cntx_sg = NULL;
 	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
+	const uint16_t *idx_addr = NULL;
 
 	if (unlikely(nb_src != nb_dst)) {
 		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
@@ -630,14 +621,16 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	memset(fd, 0, sizeof(struct qbman_fd));
 
 	if (qdma_dev->is_silent) {
-		cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(src[0].length);
-		cntx_sg = qdma_vq->cntx_sg[cntx_idx];
+		cntx_sg = qdma_vq->cntx_sg[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
 			(void **)&cntx_sg);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+		for (i = 0; i < nb_src; i++)
+			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -656,8 +649,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
 	if (qdma_vq->fle_pre_populate) {
-		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length))
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
 			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
+			if (!qdma_dev->is_silent && cntx_sg && idx_addr) {
+				for (i = 0; i < nb_src; i++)
+					cntx_sg->cntx_idx[i] = idx_addr[i];
+			}
+		}
 
 		len = sg_entry_post_populate(src, dst,
 			cntx_sg, nb_src);
@@ -683,6 +681,8 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
 	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
@@ -705,28 +705,23 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
-	uint16_t cntx_idx;
-	uint32_t len;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long;
+	struct qdma_cntx_long *cntx_long = NULL;
 	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	cntx_idx = RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length);
-	len = RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length);
-
 	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[cntx_idx];
+		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
 			(void **)&cntx_long);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = cntx_idx;
+		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -749,16 +744,20 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 			fle_sdd_pre_populate(&cntx_long->fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+			if (!qdma_dev->is_silent && cntx_long) {
+				cntx_long->cntx_idx =
+					DPAA2_QDMA_IDX_FROM_FLAG(flags);
+			}
 		}
 
-		fle_post_populate(fle, src, dst, len);
+		fle_post_populate(fle, src, dst, length);
 	} else {
 		sdd = cntx_long->fle_sdd.sdd;
 		sdd_iova = cntx_iova +
 			offsetof(struct qdma_cntx_long, fle_sdd) +
 			offsetof(struct qdma_cntx_fle_sdd, sdd);
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
-			src, dst, len,
+			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
@@ -766,6 +765,8 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
 	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
@@ -963,14 +964,17 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
+		RTE_DMA_CAPA_MEM_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_MEM |
+		RTE_DMA_CAPA_SILENT |
+		RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index eb02bff08f..371393cb85 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -199,6 +199,12 @@ struct qdma_cntx_long {
 	uint16_t rsv[3];
 } __rte_packed;
 
+#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -256,6 +262,7 @@ struct qdma_virt_queue {
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
 	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	uint16_t silent_idx;
 
 	int num_valid_jobs;
 
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 729bff42bb..e49604c8fc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2023 NXP
  */
 
 #ifndef _RTE_PMD_DPAA2_QDMA_H_
@@ -20,6 +20,17 @@
 #define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
 	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
 
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 08/30] bus/fslmc: enhance the qbman dq storage logic
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (6 preceding siblings ...)
  2024-07-22 16:39     ` [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 09/30] dma/dpaa2: add short FD support Gagandeep Singh
                       ` (21 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Multiple DQ storages are used among multiple cores, the single dq
storage of first union is leak if multiple storages are allocated.
It does not make sense to keep the single dq storage of union,
remove it and reuse the first dq storage of multiple storages
for this case.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c    | 25 ++-----
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     | 38 +++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 23 ++----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c |  4 +-
 drivers/dma/dpaa2/dpaa2_qdma.c              | 43 ++---------
 drivers/net/dpaa2/dpaa2_ethdev.c            | 81 ++++++++-------------
 drivers/net/dpaa2/dpaa2_rxtx.c              | 19 +++--
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c       |  4 +-
 9 files changed, 103 insertions(+), 141 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 07256ed7ec..160126f6d6 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 		}
 
 		/* Allocate DQ storage for the DPCI Rx queues */
-		rxq = &(dpci_node->rx_queue[i]);
-		rxq->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_BUS_ERR("q_storage allocation failed\n");
-			ret = -ENOMEM;
+		rxq = &dpci_node->rx_queue[i];
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
-			goto err;
-		}
 	}
 
 	/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 
 err:
 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
-		struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
 
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 	rte_free(dpci_node);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 4aec7b2cd8..a8afc772fd 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
 	}
 }
 
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	int i = 0;
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
-		q_storage->dq_storage[i] = rte_malloc(NULL,
+		q_storage->dq_storage[i] = rte_zmalloc(NULL,
 			dpaa2_dqrr_size * sizeof(struct qbman_result),
 			RTE_CACHE_LINE_SIZE);
 		if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	}
 	return 0;
 fail:
-	while (--i >= 0)
+	while (--i >= 0) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
+	}
 
 	return -1;
 }
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..1ce481c88d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
 	uint64_t tx_pkts;
 	uint64_t err_pkts;
 	union {
-		struct queue_storage_info_t *q_storage;
+		/**Ingress*/
+		struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+		/**Egress*/
 		struct qbman_result *cscn;
 	};
 	struct rte_event ev;
@@ -186,6 +188,38 @@ struct swp_active_dqs {
 	uint64_t reserved[7];
 };
 
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+	int ret = 0, i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		(q)->q_storage[i] = rte_zmalloc(NULL, \
+			sizeof(struct queue_storage_info_t), \
+			RTE_CACHE_LINE_SIZE); \
+		if (!(q)->q_storage[i]) { \
+			ret = -ENOBUFS; \
+			break; \
+		} \
+		ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+		if (ret) \
+			break; \
+	} \
+	ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+	int i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		if ((q)->q_storage[i]) { \
+			dpaa2_free_dq_storage((q)->q_storage[i]); \
+			rte_free((q)->q_storage[i]); \
+			(q)->q_storage[i] = NULL; \
+		} \
+	} \
+})
+
 #define NUM_MAX_SWP 64
 
 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index c1f7181d55..7df208d004 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1891,7 +1891,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
@@ -1982,10 +1982,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (qp->rx_vq.q_storage) {
-		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
-		rte_free(qp->rx_vq.q_storage);
-	}
+	dpaa2_queue_storage_free(&qp->rx_vq, 1);
 	rte_mempool_free(qp->fle_pool);
 	rte_free(qp);
 
@@ -2036,18 +2033,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->rx_vq.crypto_data = dev->data;
 	qp->tx_vq.crypto_data = dev->data;
-	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
-		sizeof(struct queue_storage_info_t),
-		RTE_CACHE_LINE_SIZE);
-	if (!qp->rx_vq.q_storage) {
-		DPAA2_SEC_ERR("malloc failed for q_storage");
-		return -ENOMEM;
-	}
-	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
-	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
-		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
-		return -ENOMEM;
+	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+	if (retcode) {
+		dpaa2_queue_storage_free((&qp->rx_vq), 1);
+		return retcode;
 	}
 
 	dev->data->queue_pairs[qp_id] = qp;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 4754b9d6f8..c51e68f748 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
  */
 
 #include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 7de4894b35..53caccecd7 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #include <rte_eal.h>
@@ -824,7 +824,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		dpaa2_dqrr_size : nb_cpls;
 	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
 	fqid = rxq->fqid;
-	q_storage = rxq->q_storage;
+	q_storage = rxq->q_storage[0];
 
 	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
@@ -1032,13 +1032,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 				qdma_dev->vqs[i].ring_cntx_idx = NULL;
 			}
 			rxq = &dpdmai_dev->rx_queue[i];
-			if (rxq->q_storage) {
-				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
-					dev->data->dev_name, i);
-				dpaa2_free_dq_storage(rxq->q_storage);
-				rte_free(rxq->q_storage);
-				rxq->q_storage = NULL;
-			}
+			dpaa2_queue_storage_free(rxq, 1);
 		}
 		rte_free(qdma_dev->vqs);
 		qdma_dev->vqs = NULL;
@@ -1094,24 +1088,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		qdma_dev->vqs[i].vq_id = i;
 		rxq = &dpdmai_dev->rx_queue[i];
 		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq->q_storage = rte_zmalloc(NULL,
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto alloc_failed;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto alloc_failed;
-		}
 	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1122,11 +1101,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 alloc_failed:
 	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	rte_free(qdma_dev->vqs);
@@ -1324,11 +1299,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
 	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	if (qdma_dev->vqs) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 449bbda7ca..ac524d2964 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	uint8_t num_rxqueue_per_tc;
 	struct dpaa2_queue *mc_q, *mcq;
 	uint32_t tot_queues;
-	int i;
+	int i, ret = 0;
 	struct dpaa2_queue *dpaa2_q;
 
 	PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < priv->nb_rx_queues; i++) {
 		mc_q->eth_data = dev->data;
 		priv->rx_vq[i] = mc_q++;
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_q->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
-			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+		dpaa2_q = priv->rx_vq[i];
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
 	}
 
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 		if (!priv->rx_err_vq)
 			goto fail;
 
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
-					sizeof(struct queue_storage_info_t) *
-					RTE_MAX_LCORE,
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
+		dpaa2_q = priv->rx_err_vq;
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		for (i = 0; i < RTE_MAX_LCORE; i++)
-			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
-				goto fail;
 	}
 
 	for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 			mc_q->tc_index = i;
 			mc_q->flow_id = 0;
 			priv->tx_conf_vq[i] = mc_q++;
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-			dpaa2_q->q_storage =
-				rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-			if (!dpaa2_q->q_storage)
-				goto fail_tx_conf;
-
-			memset(dpaa2_q->q_storage, 0,
-			       sizeof(struct queue_storage_info_t));
-			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+			dpaa2_q = priv->tx_conf_vq[i];
+			ret = dpaa2_queue_storage_alloc(dpaa2_q,
+					RTE_MAX_LCORE);
+			if (ret)
 				goto fail_tx_conf;
 		}
 	}
 
 	vq_id = 0;
 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
-		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+		mcq = priv->rx_vq[vq_id];
 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 		vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 fail_tx_conf:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->tx_conf_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->tx_conf_vq[i--] = NULL;
 	}
 	i = priv->nb_tx_queues;
 fail_tx:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+		dpaa2_q = priv->tx_vq[i];
 		rte_free(dpaa2_q->cscn);
 		priv->tx_vq[i--] = NULL;
 	}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	i -= 1;
 	mc_q = priv->rx_vq[0];
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->rx_vq[i--] = NULL;
 	}
 
 	if (dpaa2_enable_err_queue) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		if (dpaa2_q->q_storage)
-			dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_err_vq;
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 	}
 
 	rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 	if (priv->rx_vq[0]) {
 		/* cleaning up queue storage */
 		for (i = 0; i < priv->nb_rx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-			rte_free(dpaa2_q->q_storage);
+			dpaa2_q = priv->rx_vq[i];
+			dpaa2_queue_storage_free(dpaa2_q,
+				RTE_MAX_LCORE);
 		}
 		/* cleanup tx queue cscn */
 		for (i = 0; i < priv->nb_tx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+			dpaa2_q = priv->tx_vq[i];
 			rte_free(dpaa2_q->cscn);
 		}
 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
 			/* cleanup tx conf queue storage */
 			for (i = 0; i < priv->nb_tx_queues; i++) {
-				dpaa2_q = (struct dpaa2_queue *)
-						priv->tx_conf_vq[i];
-				rte_free(dpaa2_q->q_storage);
+				dpaa2_q = priv->tx_conf_vq[i];
+				dpaa2_queue_storage_free(dpaa2_q,
+					RTE_MAX_LCORE);
 			}
 		}
 		/*free memory for all queues (RX+TX) */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 23f7c4132d..a0c057d183 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
 	if (unlikely(dpaa2_enable_err_queue))
 		dump_err_pkts(priv->rx_err_vq);
 
@@ -958,7 +960,7 @@ uint16_t
 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ */
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
 	struct qbman_pull_desc pulldesc;
 	struct qbman_eq_desc eqdesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
 	/* todo - currently we are using 1st TX queue only for loopback*/
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
 		ret = dpaa2_affine_qbman_ethrx_swp();
 		if (ret) {
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index aeee4ac289..5f4d0c68a4 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
  */
 
 #include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
 
 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
-	dq_storage = rxq->q_storage->dq_storage[0];
+	dq_storage = rxq->q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 09/30] dma/dpaa2: add short FD support
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (7 preceding siblings ...)
  2024-07-22 16:39     ` [v3 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
                       ` (20 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Short FD can be used for single transfer scenario which shows higher
performance than FLE.
1) Save index context in FD att field for short and FLE(NonSG).
2) Identify FD type by att of FD.
3) Force 48 bits address for source address and fle according to spec.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c         | 314 +++++++++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  69 ++++--
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  13 -
 3 files changed, 285 insertions(+), 111 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 53caccecd7..d1358b686c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -522,7 +522,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 	const struct qdma_cntx_fle_sdd *fle_sdd;
 	const struct qdma_sdd *sdd;
 	const struct qdma_cntx_sg *cntx_sg = NULL;
-	const struct qdma_cntx_long *cntx_long = NULL;
 
 	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
 	sdd = fle_sdd->sdd;
@@ -545,11 +544,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		QBMAN_FLE_WORD4_FMT_SGE) {
 		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
 			fle_sdd);
-	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
 		QBMAN_FLE_WORD4_FMT_SBF) {
-		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
-			fle_sdd);
-	} else {
 		DPAA2_QDMA_ERR("Unsupported fle format:%d",
 			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
 		return;
@@ -560,11 +556,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		dpaa2_qdma_sdd_dump(&sdd[i]);
 	}
 
-	if (cntx_long) {
-		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
-			cntx_long->cntx_idx);
-	}
-
 	if (cntx_sg) {
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
@@ -582,6 +573,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
 				cntx_sg->cntx_idx[i]);
 		}
+	} else {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx");
 	}
 }
 
@@ -644,7 +637,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		offsetof(struct qdma_cntx_sg, fle_sdd) +
 		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
@@ -680,6 +673,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
+	dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -696,74 +690,178 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static inline void
+qdma_populate_fd_pci(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd,
+	struct dpaa2_qdma_rbp *rbp, int ser)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits(src);
+	fd->simple_pci.saddr_hi = upper_32_bits(src);
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+
+	fd->simple_pci.sportid = rbp->sportid;
+
+	fd->simple_pci.svfid = rbp->svfid;
+	fd->simple_pci.spfid = rbp->spfid;
+	fd->simple_pci.svfa = rbp->svfa;
+	fd->simple_pci.dvfid = rbp->dvfid;
+	fd->simple_pci.dpfid = rbp->dpfid;
+	fd->simple_pci.dvfa = rbp->dvfa;
+
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits(dest);
+	fd->simple_pci.daddr_hi = upper_32_bits(dest);
+}
+
+static inline void
+qdma_populate_fd_ddr(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd, int ser)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(src);
+	fd->simple_ddr.saddr_hi = upper_32_bits(src);
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss.
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits(dest);
+	fd->simple_ddr.daddr_hi = upper_32_bits(dest);
+}
+
 static int
-dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
+dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long = NULL;
-	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) {
+		/** PCIe EP*/
+		qdma_populate_fd_pci(src,
+			dst, length,
+			fd, &qdma_vq->rbp,
+			is_silent ? 0 : 1);
+	} else {
+		/** DDR or PCIe RC*/
+		qdma_populate_fd_ddr(src,
+			dst, length,
+			fd, is_silent ? 0 : 1);
+	}
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_SHORT);
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
+	} else {
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
+	}
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
+{
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_fle_sdd *fle_sdd = NULL;
+	rte_iova_t fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	if (is_silent) {
+		fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
-			(void **)&cntx_long);
+			(void **)&fle_sdd);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_long);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
-#endif
-
-	fle = cntx_long->fle_sdd.fle;
-	fle_iova = cntx_iova +
-		offsetof(struct qdma_cntx_long, fle_sdd) +
-		offsetof(struct qdma_cntx_fle_sdd, fle);
+	fle = fle_sdd->fle;
+	fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset;
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)fle);
 
 	if (qdma_vq->fle_pre_populate) {
 		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
-			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+			fle_sdd_pre_populate(fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
-			if (!qdma_dev->is_silent && cntx_long) {
-				cntx_long->cntx_idx =
-					DPAA2_QDMA_IDX_FROM_FLAG(flags);
-			}
 		}
 
 		fle_post_populate(fle, src, dst, length);
 	} else {
-		sdd = cntx_long->fle_sdd.sdd;
-		sdd_iova = cntx_iova +
-			offsetof(struct qdma_cntx_long, fle_sdd) +
-			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		sdd = fle_sdd->sdd;
+		sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset;
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
 			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
-		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
+		dpaa2_qdma_long_fmt_dump(fle);
 
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_LONG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -771,15 +869,89 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
 }
 
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	if (qdma_vq->using_short_fd)
+		return dpaa2_qdma_short_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+	else
+		return dpaa2_qdma_long_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+}
+
+static inline int
+dpaa2_qdma_dq_fd(const struct qbman_fd *fd,
+	struct qdma_virt_queue *qdma_vq,
+	uint16_t *free_space, uint16_t *fle_elem_nb)
+{
+	uint16_t idx, att;
+	enum dpaa2_qdma_fd_type type;
+	int ret;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+
+	att = dpaa2_qdma_fd_get_att(fd);
+	type = DPAA2_QDMA_FD_ATT_TYPE(att);
+	if (type == DPAA2_QDMA_FD_SHORT) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_LONG) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_SG) {
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, free_space);
+		if (unlikely(ret < cntx_sg->job_nb))
+			return -ENOSPC;
+
+		return 0;
+	}
+
+	DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x",
+		fd->simple_ddr.rsv1_att);
+	return -EIO;
+}
+
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
@@ -799,10 +971,6 @@ dpaa2_qdma_dequeue(void *dev_private,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	int ret, pull_size;
-	struct qbman_fle *fle;
-	struct qdma_cntx_fle_sdd *fle_sdd;
-	struct qdma_cntx_sg *cntx_sg;
-	struct qdma_cntx_long *cntx_long;
 	uint16_t free_space = 0, fle_elem_nb = 0;
 
 	if (unlikely(qdma_dev->is_silent))
@@ -901,25 +1069,8 @@ dpaa2_qdma_dequeue(void *dev_private,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
-		fle = fle_sdd->fle;
-		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
-		fle_elem_nb++;
-		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
-			QBMAN_FLE_WORD4_FMT_SGE) {
-			cntx_sg = container_of(fle_sdd,
-				struct qdma_cntx_sg, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				cntx_sg->cntx_idx,
-				cntx_sg->job_nb, &free_space);
-		} else {
-			cntx_long = container_of(fle_sdd,
-				struct qdma_cntx_long, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				&cntx_long->cntx_idx,
-				1, &free_space);
-		}
-		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
+		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -944,8 +1095,10 @@ dpaa2_qdma_dequeue(void *dev_private,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	rte_mempool_put_bulk(qdma_vq->fle_pool,
-		qdma_vq->fle_elem, fle_elem_nb);
+	if (fle_elem_nb > 0) {
+		rte_mempool_put_bulk(qdma_vq->fle_pool,
+			qdma_vq->fle_elem, fle_elem_nb);
+	}
 
 	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
 		cntx_idx, nb_cpls);
@@ -1178,11 +1331,18 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	else
 		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
+	/** Default Enable Short FD for nonSG format.
+	 * Short FD has higher perf than FLE.
+	 */
+	env = getenv("DPAA2_QDMA_USING_SHORT_FD");
+	if (env)
+		qdma_dev->vqs[vchan].using_short_fd = atoi(env);
+	else
+		qdma_dev->vqs[vchan].using_short_fd = 1;
+
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
-			    sizeof(struct qdma_cntx_long));
-
+	pool_size = sizeof(struct qdma_cntx_sg);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
 			DPAA2_QDMA_MAX_DESC * 2, pool_size,
 			512, 0, NULL, NULL, NULL, NULL,
@@ -1202,7 +1362,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 			return ret;
 		}
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
-				(void **)qdma_dev->vqs[vchan].cntx_long,
+				(void **)qdma_dev->vqs[vchan].cntx_fle_sdd,
 				DPAA2_QDMA_MAX_DESC);
 		if (ret) {
 			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 371393cb85..0be65e1cc6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -12,17 +12,8 @@
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
-#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT		(1ULL << 0)
-#define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
-#define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
-
-/** FLE pool cache size */
-#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
-
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
 #define DPAA2_RBP_MEM_RW            0x0
@@ -40,9 +31,14 @@
 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
-#define MAX_HW_QUEUE_PER_CORE		64
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
+#define DPAA2_QDMA_FD_LONG_FORMAT 0x1
+#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
 
-#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+#define DPAA2_QDMA_BMT_ENABLE 0x1
+#define DPAA2_QDMA_BMT_DISABLE 0x0
 
 /** Source/Destination Descriptor */
 struct qdma_sdd {
@@ -99,8 +95,8 @@ struct qdma_sdd {
 #define QDMA_SG_SL_SHORT	0x1 /* short length */
 #define QDMA_SG_SL_LONG	0x0 /* long length */
 #define QDMA_SG_F	0x1 /* last sg entry */
-#define QDMA_SG_BMT_ENABLE 0x1
-#define QDMA_SG_BMT_DISABLE 0x0
+#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
+#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
 
 struct qdma_sg_entry {
 	uint32_t addr_lo;		/* address 0:31 */
@@ -166,6 +162,40 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum dpaa2_qdma_fd_type {
+	DPAA2_QDMA_FD_SHORT = 1,
+	DPAA2_QDMA_FD_LONG = 2,
+	DPAA2_QDMA_FD_SG = 3
+};
+
+#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_TYPE(att) \
+	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
+#define DPAA2_QDMA_FD_ATT_CNTX(att) \
+	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+
+static inline void
+dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
+	uint64_t addr)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
+	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
+}
+
+static inline void
+dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
+	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
+{
+	fd->simple_ddr.rsv1_att = job_idx |
+		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
+}
+
+static inline uint16_t
+dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
+{
+	return fd->simple_ddr.rsv1_att;
+}
+
 enum {
 	DPAA2_QDMA_SDD_FLE,
 	DPAA2_QDMA_SRC_FLE,
@@ -193,12 +223,6 @@ struct qdma_cntx_sg {
 	uint16_t rsv[3];
 } __rte_packed;
 
-struct qdma_cntx_long {
-	struct qdma_cntx_fle_sdd fle_sdd;
-	uint16_t cntx_idx;
-	uint16_t rsv[3];
-} __rte_packed;
-
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
 
@@ -241,6 +265,7 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	uint64_t fle_iova2va_offset;
 	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
@@ -252,6 +277,7 @@ struct qdma_virt_queue {
 	uint64_t num_enqueues;
 	/* Total number of dequeues from this VQ */
 	uint64_t num_dequeues;
+	uint64_t copy_num;
 
 	uint16_t vq_id;
 	uint32_t flags;
@@ -261,10 +287,11 @@ struct qdma_virt_queue {
 
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
-	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
 	uint16_t silent_idx;
 
 	int num_valid_jobs;
+	int using_short_fd;
 
 	struct rte_dma_stats stats;
 };
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index e49604c8fc..df21b39cae 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,19 +7,6 @@
 
 #include <rte_compat.h>
 
-#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
-#define RTE_DPAA2_QDMA_LEN_MASK \
-	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
-	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
-
-#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
-	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
-	((length) & RTE_DPAA2_QDMA_LEN_MASK)
-
 #define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
 #define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
 	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 10/30] dma/dpaa2: limit the max descriptor number
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (8 preceding siblings ...)
  2024-07-22 16:39     ` [v3 09/30] dma/dpaa2: add short FD support Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
                       ` (19 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For non-SG format, the index is saved in FD with
DPAA2_QDMA_FD_ATT_TYPE_OFFSET(13) bits width.

The max descriptor number of ring is power of 2, so the
eventual max number is:
((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) / 2)

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 0be65e1cc6..250c83c83c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -8,8 +8,6 @@
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-#define DPAA2_QDMA_MAX_DESC		4096
-#define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
 #define DPAA2_DPDMAI_MAX_QUEUES	16
@@ -169,10 +167,15 @@ enum dpaa2_qdma_fd_type {
 };
 
 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_MAX_IDX \
+	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
-	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
+
+#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
+#define DPAA2_QDMA_MIN_DESC 1
 
 static inline void
 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
@@ -186,6 +189,7 @@ static inline void
 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
 {
+	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
 	fd->simple_ddr.rsv1_att = job_idx |
 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 11/30] dma/dpaa2: change the DMA copy return value
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (9 preceding siblings ...)
  2024-07-22 16:39     ` [v3 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
                       ` (18 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The return value of DMA copy/sg copy should be index of
descriptor copied in success.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index d1358b686c..b70750fede 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -605,6 +605,11 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -ENOTSUP;
 	}
 
+	if (unlikely(!nb_src)) {
+		DPAA2_QDMA_ERR("No SG entry specified");
+		return -EINVAL;
+	}
+
 	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
 			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
@@ -681,10 +686,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num += nb_src;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num += nb_src;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 12/30] dma/dpaa2: move the qdma header to common place
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (10 preceding siblings ...)
  2024-07-22 16:39     ` [v3 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 13/30] dma/dpaa: support multi channels Gagandeep Singh
                       ` (17 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include rte_pmd_dpaax_qdma.h instead of rte_pmd_dpaa2_qdma.h
and change code accordingly.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/api/doxy-api-index.md                 |  2 +-
 doc/api/doxy-api.conf.in                  |  2 +-
 drivers/common/dpaax/meson.build          |  3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h | 23 +++++++
 drivers/dma/dpaa2/dpaa2_qdma.c            | 84 +++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h            | 10 +--
 drivers/dma/dpaa2/meson.build             |  4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h    | 23 -------
 8 files changed, 72 insertions(+), 79 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f9283154f8..ab42440733 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
   [mlx5](@ref rte_pmd_mlx5.h),
   [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
   [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
-  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+  [dpaax](@ref rte_pmd_dpaax_qdma.h),
   [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
   [dlb2](@ref rte_pmd_dlb2.h),
   [ifpga](@ref rte_pmd_ifpga.h)
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a8823c046f..33250d867c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -8,7 +8,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/cnxk \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/dma/dpaa2 \
+                          @TOPDIR@/drivers/common/dpaax \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/event/cnxk \
                           @TOPDIR@/drivers/mempool/cnxk \
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
index a162779116..db61b76ce3 100644
--- a/drivers/common/dpaax/meson.build
+++ b/drivers/common/dpaax/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
 
 if not is_linux
     build = false
@@ -16,3 +16,4 @@ endif
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
+headers = files('rte_pmd_dpaax_qdma.h')
diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
new file mode 100644
index 0000000000..2552a4adfb
--- /dev/null
+++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index b70750fede..19d8af9416 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,7 +10,7 @@
 
 #include <mc/fsl_dpdmai.h>
 
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
@@ -212,16 +212,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 *
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
@@ -235,23 +235,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+	for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
 		/* source SG */
 		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		/* destination SG */
 		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 	}
 }
 
@@ -350,21 +348,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
 		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
 		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
@@ -444,17 +440,16 @@ fle_populate(struct qbman_fle fle[],
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
@@ -560,7 +555,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
 		if (!cntx_sg->job_nb ||
-			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
 			DPAA2_QDMA_ERR("Invalid SG job number:%d",
 				cntx_sg->job_nb);
 			return;
@@ -610,9 +605,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -EINVAL;
 	}
 
-	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+	if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
-			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+			nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
 		return -EINVAL;
 	}
 
@@ -631,11 +626,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+	cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
 
 	fle = cntx_sg->fle_sdd.fle;
 	fle_iova = cntx_iova +
@@ -667,8 +658,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			offsetof(struct qdma_cntx_sg, sg_src_entry);
 		dst_sge_iova = cntx_iova +
 			offsetof(struct qdma_cntx_sg, sg_dst_entry);
-		len = sg_entry_populate(src, dst,
-			cntx_sg, nb_src);
+		len = sg_entry_populate(src, dst, cntx_sg, nb_src);
 
 		fle_populate(fle, sdd, sdd_iova,
 			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1011,7 +1001,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 			q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			DPAA2_VADDR_TO_IOVA(dq_storage), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
 			       get_swp_active_dqs(
@@ -1046,7 +1036,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
-		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
 
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
@@ -1078,7 +1068,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
-		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -1131,11 +1121,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 		RTE_DMA_CAPA_SILENT |
 		RTE_DMA_CAPA_OPS_COPY |
 		RTE_DMA_CAPA_OPS_COPY_SG;
-	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
-	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1317,6 +1307,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char pool_name[64];
 	int ret;
 	char *env = NULL;
+	uint64_t iova, va;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1359,6 +1350,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
+	iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+	va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+	qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
 
 	if (qdma_dev->is_silent) {
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 250c83c83c..0fd1debaf8 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
 
 struct qdma_cntx_sg {
 	struct qdma_cntx_fle_sdd fle_sdd;
-	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
 	uint16_t job_nb;
 	uint16_t rsv[3];
 } __rte_packed;
 
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
-	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
 
 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
-	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
 
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build
index a99151e2a5..a523f5edb4 100644
--- a/drivers/dma/dpaa2/meson.build
+++ b/drivers/dma/dpaa2/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
 
 if not is_linux
     build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
deleted file mode 100644
index df21b39cae..0000000000
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
-	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
-	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
-	(((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
-	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 13/30] dma/dpaa: support multi channels
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (11 preceding siblings ...)
  2024-07-22 16:39     ` [v3 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
                       ` (16 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena

This patch add support to use multiple dma channel in the driver.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 10e65ef1d7..24ad7ad019 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #include <bus_dpaa_driver.h>
@@ -648,8 +648,8 @@ fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
 	}
 
 finally:
-	return fsl_qdma->desc_allocated++;
-
+	fsl_qdma->desc_allocated++;
+	return 0;
 exit:
 	return -ENOMEM;
 }
@@ -670,7 +670,7 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 			     RTE_DMA_CAPA_DEV_TO_MEM |
 			     RTE_DMA_CAPA_SILENT |
 			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 1;
+	dev_info->max_vchans = 4;
 	dev_info->max_desc = DPAADMA_MAX_DESC;
 	dev_info->min_desc = DPAADMA_MIN_DESC;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 14/30] dma/dpaa: fix job enqueue
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (12 preceding siblings ...)
  2024-07-22 16:39     ` [v3 13/30] dma/dpaa: support multi channels Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
                       ` (15 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: stable

The check shall be end instead of equal.

Fixes: 7da29a644c51 ("dma/dpaa: support DMA operations")
Cc: stable@dpdk.org

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 24ad7ad019..0a91cf040a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -615,7 +615,7 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 
 	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 
-	if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
 		reg |= FSL_QDMA_BCQMR_EI_BE;
 		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 15/30] dma/dpaa: add burst capacity API
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (13 preceding siblings ...)
  2024-07-22 16:39     ` [v3 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
                       ` (14 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Vanshika Shukla

From: Vanshika Shukla <vanshika.shukla@nxp.com>

This patch improves the dpaa qdma driver and
adds dpaa_qdma_burst_capacity API which returns the
remaining space in the descriptor ring.

Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 34 +++++++++++++++++++++++++---------
 drivers/dma/dpaa/dpaa_qdma.h |  3 +--
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 0a91cf040a..bb6b54e583 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -423,7 +423,6 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 static int
 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 				 void *block, int id, const uint16_t nb_cpls,
-				 uint16_t *last_idx,
 				 enum rte_dma_status_code *status)
 {
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
@@ -457,7 +456,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
 			fsl_status->virt_head = fsl_status->cq;
 		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
-		*last_idx = fsl_comp->index;
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
@@ -607,7 +605,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
 	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
 	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
-	fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
 	fsl_queue->virt_head++;
 
 	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
@@ -623,7 +620,7 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	} else {
 		fsl_queue->pending++;
 	}
-	return fsl_comp->index;
+	return 0;
 }
 
 static int
@@ -771,8 +768,10 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
 	struct fsl_qdma_chan *fsl_chan =
 		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	int ret;
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	int ret, idx;
 
+	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
 	void *fsl_comp = NULL;
 
 	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
@@ -783,8 +782,10 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 		return -1;
 	}
 	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	if (ret < 0)
+		return ret;
 
-	return ret;
+	return idx;
 }
 
 static uint16_t
@@ -826,8 +827,10 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, st);
+						st);
 	fsl_queue->stats.completed += intr;
+	if (last_idx != NULL)
+		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 
 	return intr;
 }
@@ -873,9 +876,10 @@ dpaa_qdma_dequeue(void *dev_private,
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, NULL);
+						NULL);
 	fsl_queue->stats.completed += intr;
-
+	if (last_idx != NULL)
+		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 	return intr;
 }
 
@@ -912,6 +916,17 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
+{
+	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
+	struct fsl_qdma_chan *fsl_chan =
+		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+
+	return fsl_queue->n_cq - fsl_queue->pending;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_info_get,
 	.dev_configure            = dpaa_qdma_configure,
@@ -1035,6 +1050,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
+	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
 	ret = dpaa_qdma_init(dmadev);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 7e9e76e21a..2092fb39f5 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #ifndef _DPAA_QDMA_H_
@@ -176,7 +176,6 @@ struct fsl_qdma_comp {
 	dma_addr_t		bus_addr;
 	dma_addr_t		desc_bus_addr;
 	void			*virt_addr;
-	int			index;
 	void			*desc_virt_addr;
 	struct fsl_qdma_chan	*qchan;
 	dma_call_back		call_back_func;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 16/30] dma/dpaa: add workaround for ERR050757
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (14 preceding siblings ...)
  2024-07-22 16:39     ` [v3 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
                       ` (13 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena

ERR050757 on LS104x indicates:

For outbound PCIe read transactions, a completion buffer is used
to store the PCIe completions till the data is passed back to the
initiator. At most 16 outstanding transactions are allowed and
maximum read request is 256 bytes. The completion buffer size
inside the controller needs to be at least 4KB, but the PCIe
controller has 3 KB of buffer. In case the size of pending
outbound read transactions of more than 3KB, the PCIe controller
may drop the incoming completions without notifying the initiator
of the transaction, leaving transactions unfinished. All
subsequent outbound reads to PCIe are blocked permanently.
To avoid qDMA hang as it keeps waiting for data that was silently
dropped, set stride mode for qDMA

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       |  3 ++-
 doc/guides/dmadevs/dpaa.rst  |  2 ++
 drivers/dma/dpaa/dpaa_qdma.c | 18 ++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h |  5 +++++
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 012935d5d7..f81e466318 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -468,7 +468,8 @@ soc_dpaa = {
         ['RTE_MACHINE', '"dpaa"'],
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
-        ['RTE_MAX_NUMA_NODES', 1]
+        ['RTE_MAX_NUMA_NODES', 1],
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index f99bfc6087..746919ec6b 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -42,6 +42,8 @@ Compilation
 For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
+- ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+
 Initialization
 --------------
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index bb6b54e583..a21279293c 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -159,6 +159,10 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 				      dma_addr_t dst, dma_addr_t src, u32 len)
 {
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+	u32 cfg = 0;
+#endif
 
 	/* Note: command table (fsl_comp->virt_addr) is getting filled
 	 * directly in cmd descriptors of queues while enqueuing the descriptor
@@ -171,6 +175,20 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
 	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
 
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
+	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+				FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
+		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
+					FSL_QDMA_CFG_SSS_OFFSET |
+					FSL_QDMA_CMD_SSS_DISTANCE);
+		sdf->cfg = cfg;
+	} else
+		sdf->cfg = 0;
+#endif
+
 	/* Status notification is enqueued to status queue. */
 	qdma_desc_addr_set64(csgf_src, src);
 	qdma_csgf_set_len(csgf_src, len);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 2092fb39f5..361f88856b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -81,6 +81,11 @@
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
 
+#define FSL_QDMA_CMD_SSEN		BIT(19)
+#define FSL_QDMA_CFG_SSS_OFFSET		12
+#define FSL_QDMA_CMD_SSS_STRIDE		128
+#define FSL_QDMA_CMD_SSS_DISTANCE	128
+
 #define QDMA_CCDF_STATUS		20
 #define QDMA_CCDF_OFFSET		20
 #define QDMA_CCDF_MASK			GENMASK(28, 20)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (15 preceding siblings ...)
  2024-07-22 16:39     ` [v3 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
                       ` (12 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena

Non-prefetchable read setting in the source descriptor may be
required for targets other than local memory. Prefetchable read
setting will offer better performance for misaligned transfers
in the form of fewer transactions and should be set if possible.
This patch also fixes QDMA stall issue due to unaligned
transactions.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       | 3 ++-
 doc/guides/dmadevs/dpaa.rst  | 1 +
 drivers/dma/dpaa/dpaa_qdma.c | 6 ++++++
 drivers/dma/dpaa/dpaa_qdma.h | 1 +
 4 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index f81e466318..f63ef41130 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -469,7 +469,8 @@ soc_dpaa = {
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
         ['RTE_MAX_NUMA_NODES', 1],
-	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true],
+	['RTE_DMA_DPAA_ERRATA_ERR050265', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 746919ec6b..8a7c0befc3 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -43,6 +43,7 @@ For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
 - ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+- ``RTE_DMA_DPAA_ERRATA_ERR050265`` - enable software workaround for Errata-A050265
 
 Initialization
 --------------
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index a21279293c..f1878879af 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -179,6 +179,9 @@ fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
 	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 				FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
 	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
 		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
 		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
@@ -247,6 +250,9 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		/* Descriptor Buffer */
 		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
 		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 361f88856b..8cb4042bd0 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -80,6 +80,7 @@
 
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
+#define FSL_QDMA_CMD_PF			BIT(17)
 
 #define FSL_QDMA_CMD_SSEN		BIT(19)
 #define FSL_QDMA_CFG_SSS_OFFSET		12
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 18/30] dma/dpaa: remove unwanted desc
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (16 preceding siblings ...)
  2024-07-22 16:39     ` [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 19/30] dma/dpaa: data path optimization Gagandeep Singh
                       ` (11 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena

remove unwanted descriptor list maintenance
and channels overhead.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 594 +++++++++++++----------------------
 drivers/dma/dpaa/dpaa_qdma.h |  43 +--
 2 files changed, 221 insertions(+), 416 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index f1878879af..8e8426b88d 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -111,96 +111,6 @@ static void
 	return virt_addr;
 }
 
-static void
-dma_pool_free(void *addr)
-{
-	rte_free(addr);
-}
-
-static void
-fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
-	int id;
-
-	if (--fsl_queue->count)
-		goto finally;
-
-	id = (fsl_qdma->block_base - fsl_queue->block_base) /
-	      fsl_qdma->block_offset;
-
-	while (rte_atomic32_read(&wait_task[id]) == 1)
-		rte_delay_us(QDMA_DELAY);
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_used,	list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-finally:
-	fsl_qdma->desc_allocated--;
-}
-
-static void
-fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-				      dma_addr_t dst, dma_addr_t src, u32 len)
-{
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	struct fsl_qdma_sdf *sdf;
-	u32 cfg = 0;
-#endif
-
-	/* Note: command table (fsl_comp->virt_addr) is getting filled
-	 * directly in cmd descriptors of queues while enqueuing the descriptor
-	 * please refer fsl_qdma_enqueue_desc
-	 * frame list table (virt_addr) + 1) and source,
-	 * destination descriptor table
-	 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
-	 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
-	 */
-	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
-	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-				FSL_QDMA_CMD_RWTTYPE_OFFSET);
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
-#endif
-	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
-		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
-					FSL_QDMA_CFG_SSS_OFFSET |
-					FSL_QDMA_CMD_SSS_DISTANCE);
-		sdf->cfg = cfg;
-	} else
-		sdf->cfg = 0;
-#endif
-
-	/* Status notification is enqueued to status queue. */
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-}
-
 /*
  * Pre-request command descriptor and compound S/G for enqueue.
  */
@@ -209,42 +119,41 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 					struct fsl_qdma_queue *queue,
 					int size, int aligned)
 {
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_format *csgf_desc;
-	int i;
-
-	for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
-		comp_temp = rte_zmalloc("qdma: comp temp",
-					sizeof(*comp_temp), 0);
-		if (!comp_temp)
-			return -ENOMEM;
-
-		comp_temp->virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
-		if (!comp_temp->virt_addr) {
-			rte_free(comp_temp);
+	struct fsl_qdma_format *ccdf;
+	int i, j;
+	struct fsl_qdma_format *head;
+
+	head = queue->virt_head;
+
+	for (i = 0; i < (int)(queue->n_cq); i++) {
+		dma_addr_t bus_addr = 0, desc_bus_addr = 0;
+
+		queue->virt_addr[i] =
+		dma_pool_alloc(size, aligned, &bus_addr);
+		if (!queue->virt_addr[i])
 			goto fail;
-		}
 
-		comp_temp->desc_virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
-		if (!comp_temp->desc_virt_addr) {
-			rte_free(comp_temp->virt_addr);
-			rte_free(comp_temp);
+		queue->desc_virt_addr[i] =
+		dma_pool_alloc(size, aligned, &desc_bus_addr);
+		if (!queue->desc_virt_addr[i]) {
+			rte_free(queue->virt_addr[i]);
 			goto fail;
 		}
 
-		memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(comp_temp->desc_virt_addr, 0,
+		memset(queue->virt_addr[i], 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
+		memset(queue->desc_virt_addr[i], 0,
 		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
 
-		csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
-		sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
-		ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+		csgf_desc = (struct fsl_qdma_format *)queue->virt_addr[i] +
+			    QDMA_DESC_OFF;
+		sdf = (struct fsl_qdma_sdf *)queue->desc_virt_addr[i];
+		ddf = (struct fsl_qdma_ddf *)sdf + QDMA_DESC_OFF;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+		qdma_desc_addr_set64(csgf_desc, desc_bus_addr);
+
 		/* It must be 32 as Compound S/G Descriptor */
 		qdma_csgf_set_len(csgf_desc, 32);
 		/* Descriptor Buffer */
@@ -258,106 +167,84 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
 				FSL_QDMA_CMD_LWC_OFFSET);
 
-		list_add_tail(&comp_temp->list, &queue->comp_free);
+		ccdf = (struct fsl_qdma_format *)queue->virt_head;
+		qdma_desc_addr_set64(ccdf, bus_addr + 16);
+		qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(queue->virt_addr[i]));
+		qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(queue->virt_addr[i]));
+		queue->virt_head++;
 	}
+	queue->virt_head = head;
+	queue->ci = 0;
 
 	return 0;
 
 fail:
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		rte_free(comp_temp->virt_addr);
-		rte_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
+	for (j = 0; j < i; j++) {
+		rte_free(queue->virt_addr[j]);
+		rte_free(queue->desc_virt_addr[j]);
 	}
 
 	return -ENOMEM;
 }
 
-/*
- * Request a command descriptor for enqueue.
- */
-static struct fsl_qdma_comp *
-fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+static struct fsl_qdma_queue
+*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, int k, int b)
 {
-	struct fsl_qdma_queue *queue = fsl_chan->queue;
-	struct fsl_qdma_comp *comp_temp;
-
-	if (!list_empty(&queue->comp_free)) {
-		comp_temp = list_first_entry(&queue->comp_free,
-					     struct fsl_qdma_comp,
-					     list);
-		list_del(&comp_temp->list);
-		return comp_temp;
+	struct fsl_qdma_queue *queue_temp;
+
+	queue_temp = rte_zmalloc("qdma: queue head", sizeof(*queue_temp), 0);
+	if (!queue_temp) {
+		printf("no memory to allocate queues\n");
+		return NULL;
 	}
 
-	return NULL;
-}
+	queue_temp->cq =
+	dma_pool_alloc(sizeof(struct fsl_qdma_format) *
+		       QDMA_QUEUE_SIZE,
+		       sizeof(struct fsl_qdma_format) *
+		       QDMA_QUEUE_SIZE, &queue_temp->bus_addr);
 
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
-{
-	struct fsl_qdma_queue *queue_head, *queue_temp;
-	int len, i, j;
-	int queue_num;
-	int blocks;
-	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-
-	queue_num = fsl_qdma->n_queues;
-	blocks = fsl_qdma->num_blocks;
-
-	len = sizeof(*queue_head) * queue_num * blocks;
-	queue_head = rte_zmalloc("qdma: queue head", len, 0);
-	if (!queue_head)
+	if (!queue_temp->cq) {
+		rte_free(queue_temp);
 		return NULL;
-
-	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
-		queue_size[i] = QDMA_QUEUE_SIZE;
-
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-				DPAA_QDMA_ERR("Get wrong queue-sizes.\n");
-				goto fail;
-			}
-			queue_temp = queue_head + i + (j * queue_num);
-
-			queue_temp->cq =
-			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-				       queue_size[i],
-				       sizeof(struct fsl_qdma_format) *
-				       queue_size[i], &queue_temp->bus_addr);
-
-			if (!queue_temp->cq)
-				goto fail;
-
-			memset(queue_temp->cq, 0x0, queue_size[i] *
-			       sizeof(struct fsl_qdma_format));
-
-			queue_temp->block_base = fsl_qdma->block_base +
-				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-			queue_temp->n_cq = queue_size[i];
-			queue_temp->id = i;
-			queue_temp->count = 0;
-			queue_temp->pending = 0;
-			queue_temp->virt_head = queue_temp->cq;
-			queue_temp->stats = (struct rte_dma_stats){0};
-		}
 	}
-	return queue_head;
 
-fail:
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			queue_temp = queue_head + i + (j * queue_num);
-			dma_pool_free(queue_temp->cq);
-		}
+	memset(queue_temp->cq, 0x0, QDMA_QUEUE_SIZE *
+	       sizeof(struct fsl_qdma_format));
+
+	queue_temp->queue_base = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, b);
+	queue_temp->n_cq = QDMA_QUEUE_SIZE;
+	queue_temp->id = k;
+	queue_temp->pending = 0;
+	queue_temp->virt_head = queue_temp->cq;
+	queue_temp->virt_addr = rte_malloc("queue virt addr",
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!queue_temp->virt_addr) {
+		rte_free(queue_temp->cq);
+		rte_free(queue_temp);
+		return NULL;
 	}
-	rte_free(queue_head);
+	queue_temp->desc_virt_addr = rte_malloc("queue desc virt addr",
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!queue_temp->desc_virt_addr) {
+		rte_free(queue_temp->virt_addr);
+		rte_free(queue_temp->cq);
+		rte_free(queue_temp);
+		return NULL;
+	}
+	queue_temp->stats = (struct rte_dma_stats){0};
+
+	return queue_temp;
+}
 
-	return NULL;
+static void
+fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
+{
+	rte_free(queue->desc_virt_addr);
+	rte_free(queue->virt_addr);
+	rte_free(queue->cq);
+	rte_free(queue);
 }
 
 static struct
@@ -367,11 +254,6 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	unsigned int status_size;
 
 	status_size = QDMA_STATUS_SIZE;
-	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-		DPAA_QDMA_ERR("Get wrong status_size.\n");
-		return NULL;
-	}
 
 	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
 	if (!status_head)
@@ -399,6 +281,13 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	return status_head;
 }
 
+static void
+fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
+{
+	rte_free(status->cq);
+	rte_free(status);
+}
+
 static int
 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -449,12 +338,9 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 				 void *block, int id, const uint16_t nb_cpls,
 				 enum rte_dma_status_code *status)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_queue *temp_queue;
 	struct fsl_qdma_format *status_addr;
-	struct fsl_qdma_comp *fsl_comp = NULL;
-	u32 reg, i;
+	u32 reg;
 	int count = 0;
 
 	while (count < nb_cpls) {
@@ -464,14 +350,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 
 		status_addr = fsl_status->virt_head;
 
-		i = qdma_ccdf_get_queue(status_addr) +
-			id * fsl_qdma->n_queues;
-		temp_queue = fsl_queue + i;
-		fsl_comp = list_first_entry(&temp_queue->comp_used,
-					    struct fsl_qdma_comp,
-					    list);
-		list_del(&fsl_comp->list);
-
 		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
 		reg |= FSL_QDMA_BSQMR_DI_BE;
 
@@ -483,7 +361,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
-		list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
 		count++;
 
 	}
@@ -493,7 +370,6 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *temp;
 	void *ctrl = fsl_qdma->ctrl_base;
 	void *block;
@@ -508,11 +384,13 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		return ret;
 	}
 
+	int k = 0;
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		for (i = 0; i < fsl_qdma->n_queues; i++) {
-			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+		k = 0;
+		for (i = (j * QDMA_QUEUES); i < ((j * QDMA_QUEUES) + QDMA_QUEUES); i++) {
+			temp = fsl_qdma->queue[i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -522,19 +400,20 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			 */
 
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQDPA_SADDR(i));
+				    block + FSL_QDMA_BCQDPA_SADDR(k));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEDPA_SADDR(i));
+				    block + FSL_QDMA_BCQEDPA_SADDR(k));
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEPA_SADDR(i));
+				    block + FSL_QDMA_BCQEPA_SADDR(k));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEEPA_SADDR(i));
+				    block + FSL_QDMA_BCQEEPA_SADDR(k));
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
 			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
+			qdma_writel(reg, block + FSL_QDMA_BCQMR(k));
+			k++;
 		}
 
 		/*
@@ -585,36 +464,19 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static void *
-fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
-			   dma_addr_t src, size_t len,
-			   void *call_back,
-			   void *param)
-{
-	struct fsl_qdma_comp *fsl_comp;
-
-	fsl_comp =
-	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
-	if (!fsl_comp)
-		return NULL;
-
-	fsl_comp->qchan = fsl_chan;
-	fsl_comp->call_back_func = call_back;
-	fsl_comp->params = param;
-
-	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-	return (void *)fsl_comp;
-}
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
-				  struct fsl_qdma_comp *fsl_comp,
-				  uint64_t flags)
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
+				  uint64_t flags, dma_addr_t dst,
+				  dma_addr_t src, size_t len)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	struct fsl_qdma_format *ccdf;
+	void *block = fsl_queue->queue_base;
+	struct fsl_qdma_format *csgf_src, *csgf_dest;
 	u32 reg;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+	u32 cfg = 0;
+#endif
 
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
@@ -624,17 +486,40 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
 		return -1;
 
-	/* filling descriptor  command table */
-	ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
-	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
-	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
+	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
+		   QDMA_SGF_SRC_OFF;
+	csgf_dest = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
+		    QDMA_SGF_DST_OFF;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = (struct fsl_qdma_sdf *)fsl_queue->desc_virt_addr[fsl_queue->ci];
+	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+			FSL_QDMA_CMD_RWTTYPE_OFFSET);
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+#endif
+	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
+		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
+					FSL_QDMA_CFG_SSS_OFFSET |
+					FSL_QDMA_CMD_SSS_DISTANCE);
+		sdf->cfg = cfg;
+	} else
+		sdf->cfg = 0;
+#endif
+	qdma_desc_addr_set64(csgf_src, src);
+	qdma_csgf_set_len(csgf_src, len);
+	qdma_desc_addr_set64(csgf_dest, dst);
+	qdma_csgf_set_len(csgf_dest, len);
+	/* This entry is the last entry. */
+	qdma_csgf_set_f(csgf_dest, len);
 	fsl_queue->virt_head++;
+	fsl_queue->ci++;
 
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
+	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) {
 		fsl_queue->virt_head = fsl_queue->cq;
+		fsl_queue->ci = 0;
+	}
 
-	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
@@ -647,34 +532,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
 	return 0;
 }
 
-static int
-fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	int ret;
-
-	if (fsl_queue->count++)
-		goto finally;
-
-	INIT_LIST_HEAD(&fsl_queue->comp_free);
-	INIT_LIST_HEAD(&fsl_queue->comp_used);
-
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-				FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
-	if (ret) {
-		DPAA_QDMA_ERR(
-			"failed to alloc dma buffer for comp descriptor\n");
-		goto exit;
-	}
-
-finally:
-	fsl_qdma->desc_allocated++;
-	return 0;
-exit:
-	return -ENOMEM;
-}
-
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 	      uint32_t info_sz)
@@ -701,35 +558,26 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 static int
 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
 {
-	u32 i, start, end;
+	u32 i;
 	int ret;
+	struct fsl_qdma_queue *fsl_queue;
 
-	start = fsl_qdma->free_block_id * QDMA_QUEUES;
-	fsl_qdma->free_block_id++;
-
-	end = start + 1;
-	for (i = start; i < end; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		if (fsl_chan->free) {
-			fsl_chan->free = false;
-			ret = fsl_qdma_alloc_chan_resources(fsl_chan);
-			if (ret)
-				return ret;
-
-			fsl_qdma->vchan_map[vchan] = i;
-			return 0;
-		}
+	if (fsl_qdma->free_block_id == QDMA_BLOCKS) {
+		DPAA_QDMA_ERR("Maximum 4 queues can be configured\n");
+		return -1;
 	}
 
-	return -1;
-}
+	i = fsl_qdma->free_block_id * QDMA_QUEUES;
 
-static void
-dma_release(void *fsl_chan)
-{
-	((struct fsl_qdma_chan *)fsl_chan)->free = true;
-	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+	fsl_queue = fsl_qdma->queue[i];
+	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
+			FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+	if (ret)
+		return ret;
+
+	fsl_qdma->vchan_map[vchan] = i;
+	fsl_qdma->free_block_id++;
+	return 0;
 }
 
 static int
@@ -767,10 +615,9 @@ static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	void *block = fsl_queue->queue_base;
 	u32 reg;
 
 	while (fsl_queue->pending) {
@@ -790,22 +637,13 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 		  uint32_t length, uint64_t flags)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	int ret, idx;
 
 	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
-	void *fsl_comp = NULL;
 
-	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
-			(dma_addr_t)dst, (dma_addr_t)src,
-			length, NULL, NULL);
-	if (!fsl_comp) {
-		DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n");
-		return -1;
-	}
-	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, (dma_addr_t)dst, (dma_addr_t)src, length);
 	if (ret < 0)
 		return ret;
 
@@ -822,9 +660,8 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -870,9 +707,8 @@ dpaa_qdma_dequeue(void *dev_private,
 	void *block;
 	int intr;
 	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 	if (intr) {
@@ -912,9 +748,8 @@ dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
 		    struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -931,9 +766,8 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	fsl_queue->stats = (struct rte_dma_stats){0};
 
@@ -944,9 +778,8 @@ static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
 	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue =
+		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -965,43 +798,21 @@ static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan;
 	uint64_t phys_addr;
-	unsigned int len;
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i;
+	u32 i, k = 0;
+	int j;
 
-	fsl_qdma->desc_allocated = 0;
-	fsl_qdma->n_chans = VIRT_CHANNELS;
-	fsl_qdma->n_queues = QDMA_QUEUES;
+	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
 
-	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
-	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
-	if (!fsl_qdma->chans)
-		return -1;
-
-	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
-	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
-	if (!fsl_qdma->status) {
-		rte_free(fsl_qdma->chans);
-		return -1;
-	}
-
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		rte_atomic32_init(&wait_task[i]);
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
-		if (!fsl_qdma->status[i])
-			goto err;
-	}
-
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		goto err;
+		return -1;
 	}
 
 	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
@@ -1014,39 +825,55 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (fsl_qdma->ctrl_base == MAP_FAILED) {
 		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
 		       "size %d\n", phys_addr, regs_size);
-		goto err;
+		return -1;
 	}
 
 	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
 	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
 
-	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
+	fsl_qdma->status = rte_malloc("status queue", sizeof(struct fsl_qdma_queue) * 4, 0);
+	if (!fsl_qdma->status)
+		goto err;
+
+	fsl_qdma->queue = rte_malloc("cmd queue", sizeof(struct fsl_qdma_queue) * 32, 0);
 	if (!fsl_qdma->queue) {
-		munmap(fsl_qdma->ctrl_base, regs_size);
+		rte_free(fsl_qdma->status);
 		goto err;
 	}
 
-	for (i = 0; i < fsl_qdma->n_chans; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+		if (!fsl_qdma->status[i])
+			goto mem_free;
+		j = 0;
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++) {
+			fsl_qdma->queue[k] = fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
+			if (!fsl_qdma->queue[k])
+				goto mem_free;
+			j++;
+		}
 
-		fsl_chan->qdma = fsl_qdma;
-		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-							fsl_qdma->num_blocks);
-		fsl_chan->free = true;
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+		rte_free(fsl_qdma->status);
+		goto mem_free;
 	}
 
 	return 0;
 
-err:
-	rte_free(fsl_qdma->chans);
+mem_free:
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
+			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
+	}
 	rte_free(fsl_qdma->status);
+err:
+	rte_free(fsl_qdma->queue);
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	return -1;
 }
@@ -1092,17 +919,16 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+	uint32_t i, k;
 
-	for (i = 0; i < max; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		if (fsl_chan->free == false)
-			dma_release(fsl_chan);
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
+			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
 	}
 
+	rte_free(fsl_qdma->queue);
 	rte_free(fsl_qdma->status);
-	rte_free(fsl_qdma->chans);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 8cb4042bd0..80366ce890 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -107,6 +107,9 @@
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
 #define QDMA_DELAY			1000
+#define QDMA_SGF_SRC_OFF		2
+#define QDMA_SGF_DST_OFF		3
+#define QDMA_DESC_OFF			1
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -157,55 +160,31 @@ struct fsl_qdma_ddf {
 	__le32 cmd;
 };
 
-struct fsl_qdma_chan {
-	struct fsl_qdma_engine	*qdma;
-	struct fsl_qdma_queue	*queue;
-	bool			free;
-	struct list_head	list;
-};
-
 struct fsl_qdma_queue {
 	struct fsl_qdma_format	*virt_head;
-	struct list_head	comp_used;
-	struct list_head	comp_free;
-	dma_addr_t		bus_addr;
-	u32			n_cq;
-	u32			id;
-	u32			count;
-	u32			pending;
+	void                    **virt_addr;
+	u8			ci;
+	u8			n_cq;
+	u8			id;
+	void			*queue_base;
 	struct fsl_qdma_format	*cq;
-	void			*block_base;
 	struct rte_dma_stats	stats;
-};
-
-struct fsl_qdma_comp {
+	u8			pending;
 	dma_addr_t		bus_addr;
-	dma_addr_t		desc_bus_addr;
-	void			*virt_addr;
-	void			*desc_virt_addr;
-	struct fsl_qdma_chan	*qchan;
-	dma_call_back		call_back_func;
-	void			*params;
-	struct list_head	list;
+	void			**desc_virt_addr;
 };
 
 struct fsl_qdma_engine {
-	int			desc_allocated;
 	void			*ctrl_base;
 	void			*status_base;
 	void			*block_base;
-	u32			n_chans;
 	u32			n_queues;
-	int			error_irq;
-	struct fsl_qdma_queue	*queue;
+	struct fsl_qdma_queue	**queue;
 	struct fsl_qdma_queue	**status;
-	struct fsl_qdma_chan	*chans;
 	u32			num_blocks;
 	u8			free_block_id;
 	u32			vchan_map[4];
 	int			block_offset;
 };
 
-static rte_atomic32_t wait_task[CORE_NUMBER];
-
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 19/30] dma/dpaa: data path optimization
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (17 preceding siblings ...)
  2024-07-22 16:39     ` [v3 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 20/30] dma/dpaa: refactor driver Gagandeep Singh
                       ` (10 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena

Remove unnessary status read before every send.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 186 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 ++
 2 files changed, 101 insertions(+), 92 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8e8426b88d..4022ad6469 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -248,7 +248,8 @@ fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 }
 
 static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+					   u32 id)
 {
 	struct fsl_qdma_queue *status_head;
 	unsigned int status_size;
@@ -277,6 +278,8 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
 	       sizeof(struct fsl_qdma_format));
 	status_head->n_cq = status_size;
 	status_head->virt_head = status_head->cq;
+	status_head->queue_base = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
 	return status_head;
 }
@@ -334,12 +337,9 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
+fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
 				 enum rte_dma_status_code *status)
 {
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_format *status_addr;
 	u32 reg;
 	int count = 0;
 
@@ -348,16 +348,7 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 		if (reg & FSL_QDMA_BSQSR_QE_BE)
 			return count;
 
-		status_addr = fsl_status->virt_head;
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
 
@@ -472,19 +463,37 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 {
 	void *block = fsl_queue->queue_base;
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
-	u32 reg;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
 	u32 cfg = 0;
 #endif
 
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	u32 reg;
+
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
 	 */
 	reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
+	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
+		DPAA_QDMA_ERR("QDMA Engine is busy\n");
 		return -1;
+	}
+#else
+	/* check whether critical watermark level reached,
+	 * below check is valid for only single queue per block
+	 */
+	if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
+			>= QDMA_QUEUE_CR_WM) {
+		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
+		return -1;
+	}
+#endif
+	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
+		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
+		return -1;
+	}
 
 	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
 		   QDMA_SGF_SRC_OFF;
@@ -512,19 +521,14 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 	qdma_csgf_set_len(csgf_dest, len);
 	/* This entry is the last entry. */
 	qdma_csgf_set_f(csgf_dest, len);
-	fsl_queue->virt_head++;
 	fsl_queue->ci++;
 
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) {
-		fsl_queue->virt_head = fsl_queue->cq;
+	if (fsl_queue->ci == fsl_queue->n_cq)
 		fsl_queue->ci = 0;
-	}
-
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI,
+			       block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->stats.submitted++;
 	} else {
 		fsl_queue->pending++;
@@ -618,12 +622,9 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
 	void *block = fsl_queue->queue_base;
-	u32 reg;
 
 	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel_be(FSL_QDMA_BCQMR_EI, block + FSL_QDMA_BCQMR(fsl_queue->id));
 		fsl_queue->pending--;
 		fsl_queue->stats.submitted++;
 	}
@@ -656,44 +657,43 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			 enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	void *status = fsl_qdma->status_base;
+	int intr;
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, st);
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+#endif
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			fsl_queue->stats.errors++;
+		}
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						st);
-	fsl_queue->stats.completed += intr;
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
 
-	return intr;
+	return ret;
 }
 
 
@@ -703,44 +703,46 @@ dpaa_qdma_dequeue(void *dev_private,
 		  uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
+	int ret;
 	struct fsl_qdma_queue *fsl_queue =
 		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	void *status = fsl_qdma->status_base;
+	int intr;
+#endif
 
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
+	*has_error = false;
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
+					       nb_cpls, NULL);
+#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
+	if (!ret) {
+		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
+		if (intr) {
+			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
+			intr = qdma_readl(status + FSL_QDMA_DECBR);
+			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
+			qdma_writel_be(0xbf,
+				    status + FSL_QDMA_DEDR);
+			intr = qdma_readl(status + FSL_QDMA_DEDR);
+			*has_error = true;
+			fsl_queue->stats.errors++;
+		}
 	}
-
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						NULL);
-	fsl_queue->stats.completed += intr;
+#endif
+	fsl_queue->stats.completed += ret;
 	if (last_idx != NULL)
 		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
-	return intr;
+	return ret;
 }
 
 static int
@@ -842,7 +844,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	}
 
 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
 		if (!fsl_qdma->status[i])
 			goto mem_free;
 		j = 0;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 80366ce890..8a4517a70a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -58,11 +58,17 @@
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
+/* Update the value appropriately whenever QDMA_QUEUE_SIZE
+ * changes.
+ */
+#define FSL_QDMA_BCQMR_EI		0x20c0
+
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
 #define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -110,6 +116,7 @@
 #define QDMA_SGF_SRC_OFF		2
 #define QDMA_SGF_DST_OFF		3
 #define QDMA_DESC_OFF			1
+#define QDMA_QUEUE_CR_WM		32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 20/30] dma/dpaa: refactor driver
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (18 preceding siblings ...)
  2024-07-22 16:39     ` [v3 19/30] dma/dpaa: data path optimization Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
                       ` (9 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Return complete index instead of total complete counter
in complete callback.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 534 ++++++++++++++++++-----------------
 drivers/dma/dpaa/dpaa_qdma.h | 109 +++----
 2 files changed, 330 insertions(+), 313 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 4022ad6469..dc17aa4520 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -15,19 +15,6 @@ qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
-{
-	return ccdf->cfg8b_w1 & 0xff;
-}
-
-static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
-{
-	return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_OFFSET;
-}
-
 static inline void
 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
 {
@@ -59,8 +46,7 @@ qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
 	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
 }
 
-static inline int
-ilog2(int x)
+static inline int ilog2(int x)
 {
 	int log = 0;
 
@@ -73,32 +59,50 @@ ilog2(int x)
 	return log;
 }
 
-static u32
+static inline int ilog2_qsize(uint32_t q_size)
+{
+	return (ilog2(q_size) - ilog2(64));
+}
+
+static inline int ilog2_qthld(uint32_t q_thld)
+{
+	return (ilog2(q_thld) - ilog2(16));
+}
+
+static inline int
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
+{
+	struct rte_dma_stats *stats = &fsl_queue->stats;
+
+	return (stats->submitted - stats->completed);
+}
+
+static uint32_t
 qdma_readl(void *addr)
 {
 	return QDMA_IN(addr);
 }
 
 static void
-qdma_writel(u32 val, void *addr)
+qdma_writel(uint32_t val, void *addr)
 {
 	QDMA_OUT(addr, val);
 }
 
-static u32
+static uint32_t
 qdma_readl_be(void *addr)
 {
 	return QDMA_IN_BE(addr);
 }
 
 static void
-qdma_writel_be(u32 val, void *addr)
+qdma_writel_be(uint32_t val, void *addr)
 {
 	QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
@@ -115,47 +119,48 @@ static void
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-					struct fsl_qdma_queue *queue,
-					int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *csgf_desc;
 	struct fsl_qdma_format *ccdf;
-	int i, j;
+	uint16_t i, j;
 	struct fsl_qdma_format *head;
+	struct fsl_qdma_cmpd_ft *ft;
+	struct fsl_qdma_df *df;
 
-	head = queue->virt_head;
+	head = queue->cmd_desc;
 
-	for (i = 0; i < (int)(queue->n_cq); i++) {
-		dma_addr_t bus_addr = 0, desc_bus_addr = 0;
+	for (i = 0; i < queue->n_cq; i++) {
+		dma_addr_t phy_ft = 0, phy_df = 0;
 
-		queue->virt_addr[i] =
-		dma_pool_alloc(size, aligned, &bus_addr);
-		if (!queue->virt_addr[i])
+		queue->ft[i] =
+			dma_pool_alloc(sizeof(struct fsl_qdma_cmpd_ft),
+				RTE_CACHE_LINE_SIZE, &phy_ft);
+		if (!queue->ft[i])
 			goto fail;
 
-		queue->desc_virt_addr[i] =
-		dma_pool_alloc(size, aligned, &desc_bus_addr);
-		if (!queue->desc_virt_addr[i]) {
-			rte_free(queue->virt_addr[i]);
+		queue->df[i] =
+			dma_pool_alloc(sizeof(struct fsl_qdma_df),
+				RTE_CACHE_LINE_SIZE, &phy_df);
+		if (!queue->df[i]) {
+			rte_free(queue->ft[i]);
 			goto fail;
 		}
 
-		memset(queue->virt_addr[i], 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(queue->desc_virt_addr[i], 0,
-		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
+		memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
+		memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
 
-		csgf_desc = (struct fsl_qdma_format *)queue->virt_addr[i] +
-			    QDMA_DESC_OFF;
-		sdf = (struct fsl_qdma_sdf *)queue->desc_virt_addr[i];
-		ddf = (struct fsl_qdma_ddf *)sdf + QDMA_DESC_OFF;
+		ft = queue->ft[i];
+		df = queue->df[i];
+		sdf = &df->sdf;
+		ddf = &df->ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, desc_bus_addr);
+		qdma_desc_addr_set64(&ft->desc_buf, phy_df);
 
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(csgf_desc, 32);
+		qdma_csgf_set_len(&ft->desc_buf,
+			sizeof(struct fsl_qdma_df));
 		/* Descriptor Buffer */
 		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
@@ -167,73 +172,72 @@ fsl_qdma_pre_request_enqueue_comp_sd_desc(
 		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
 				FSL_QDMA_CMD_LWC_OFFSET);
 
-		ccdf = (struct fsl_qdma_format *)queue->virt_head;
-		qdma_desc_addr_set64(ccdf, bus_addr + 16);
-		qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(queue->virt_addr[i]));
-		qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(queue->virt_addr[i]));
-		queue->virt_head++;
+		ccdf = queue->cmd_desc;
+		qdma_desc_addr_set64(ccdf, phy_ft);
+		qdma_ccdf_set_format(ccdf, 0);
+		qdma_ccdf_set_ser(ccdf, 0);
+		queue->cmd_desc++;
 	}
-	queue->virt_head = head;
+	queue->cmd_desc = head;
 	queue->ci = 0;
 
 	return 0;
 
 fail:
 	for (j = 0; j < i; j++) {
-		rte_free(queue->virt_addr[j]);
-		rte_free(queue->desc_virt_addr[j]);
+		rte_free(queue->ft[j]);
+		rte_free(queue->df[j]);
 	}
 
 	return -ENOMEM;
 }
 
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma, int k, int b)
+static struct fsl_qdma_queue *
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+	int queue_id, int block_id)
 {
 	struct fsl_qdma_queue *queue_temp;
+	uint32_t queue_size;
 
-	queue_temp = rte_zmalloc("qdma: queue head", sizeof(*queue_temp), 0);
+	queue_temp = rte_zmalloc("qdma: queue head",
+		sizeof(struct fsl_qdma_queue), 0);
 	if (!queue_temp) {
-		printf("no memory to allocate queues\n");
+		DPAA_QDMA_ERR("no memory to allocate queues\n");
 		return NULL;
 	}
+	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
 
-	queue_temp->cq =
-	dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-		       QDMA_QUEUE_SIZE,
-		       sizeof(struct fsl_qdma_format) *
-		       QDMA_QUEUE_SIZE, &queue_temp->bus_addr);
-
+	queue_temp->cq = dma_pool_alloc(queue_size,
+		queue_size, &queue_temp->bus_addr);
 	if (!queue_temp->cq) {
 		rte_free(queue_temp);
 		return NULL;
 	}
 
-	memset(queue_temp->cq, 0x0, QDMA_QUEUE_SIZE *
-	       sizeof(struct fsl_qdma_format));
+	memset(queue_temp->cq, 0x0, queue_size);
 
-	queue_temp->queue_base = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, b);
+	queue_temp->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 	queue_temp->n_cq = QDMA_QUEUE_SIZE;
-	queue_temp->id = k;
+	queue_temp->queue_id = queue_id;
 	queue_temp->pending = 0;
-	queue_temp->virt_head = queue_temp->cq;
-	queue_temp->virt_addr = rte_malloc("queue virt addr",
+	queue_temp->cmd_desc = queue_temp->cq;
+	queue_temp->ft = rte_malloc("Compound Frame Table",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->virt_addr) {
+	if (!queue_temp->ft) {
 		rte_free(queue_temp->cq);
 		rte_free(queue_temp);
 		return NULL;
 	}
-	queue_temp->desc_virt_addr = rte_malloc("queue desc virt addr",
+	queue_temp->df = rte_malloc("Descriptor Buffer",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->desc_virt_addr) {
-		rte_free(queue_temp->virt_addr);
+	if (!queue_temp->df) {
+		rte_free(queue_temp->ft);
 		rte_free(queue_temp->cq);
 		rte_free(queue_temp);
 		return NULL;
 	}
-	queue_temp->stats = (struct rte_dma_stats){0};
+	memset(&queue_temp->stats, 0, sizeof(struct rte_dma_stats));
 
 	return queue_temp;
 }
@@ -241,45 +245,43 @@ static struct fsl_qdma_queue
 static void
 fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
 {
-	rte_free(queue->desc_virt_addr);
-	rte_free(queue->virt_addr);
+	rte_free(queue->df);
+	rte_free(queue->ft);
 	rte_free(queue->cq);
 	rte_free(queue);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
-					   u32 id)
+static struct fsl_qdma_queue *
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+	uint32_t block_id)
 {
 	struct fsl_qdma_queue *status_head;
-	unsigned int status_size;
+	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE;
+	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
 
-	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
+	status_head = rte_zmalloc("qdma: status head",
+		sizeof(*status_head), 0);
 	if (!status_head)
 		return NULL;
 
 	/*
 	 * Buffer for queue command
 	 */
-	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 &status_head->bus_addr);
+	status_head->cq = dma_pool_alloc(status_size,
+		status_size, &status_head->bus_addr);
 
 	if (!status_head->cq) {
 		rte_free(status_head);
 		return NULL;
 	}
 
-	memset(status_head->cq, 0x0, status_size *
-	       sizeof(struct fsl_qdma_format));
+	memset(status_head->cq, 0x0, status_size);
 	status_head->n_cq = status_size;
-	status_head->virt_head = status_head->cq;
-	status_head->queue_base = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
+	status_head->cmd_desc = status_head->cq;
+	status_head->block_id = block_id;
+	status_head->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
 	return status_head;
 }
@@ -294,11 +296,11 @@ fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
 static int
 fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 {
-	void *ctrl = fsl_qdma->ctrl_base;
-	void *block;
+	uint8_t *ctrl = fsl_qdma->ctrl_base;
+	uint8_t *block;
 	int i, count = RETRIES;
 	unsigned int j;
-	u32 reg;
+	uint32_t reg;
 
 	/* Disable the command queue and wait for idle state. */
 	reg = qdma_readl(ctrl + FSL_QDMA_DMR);
@@ -337,11 +339,13 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
-				 enum rte_dma_status_code *status)
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
+	const uint16_t nb_cpls,
+	enum rte_dma_status_code *status)
 {
-	u32 reg;
+	uint32_t reg;
 	int count = 0;
+	uint8_t *block = fsl_queue->block_vir;
 
 	while (count < nb_cpls) {
 		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
@@ -351,9 +355,11 @@ fsl_qdma_queue_transfer_complete(void *block, const uint16_t nb_cpls,
 		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
 		if (status != NULL)
 			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
+		fsl_queue->complete++;
+		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
+			fsl_queue->complete = 0;
 
 		count++;
-
 	}
 	return count;
 }
@@ -363,9 +369,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
 	struct fsl_qdma_queue *temp;
 	void *ctrl = fsl_qdma->ctrl_base;
-	void *block;
-	u32 i, j;
-	u32 reg;
+	uint8_t *block;
+	uint32_t i, j, k = 0;
+	uint32_t reg;
 	int ret, val;
 
 	/* Try to halt the qDMA engine first. */
@@ -375,13 +381,11 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		return ret;
 	}
 
-	int k = 0;
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		k = 0;
-		for (i = (j * QDMA_QUEUES); i < ((j * QDMA_QUEUES) + QDMA_QUEUES); i++) {
-			temp = fsl_qdma->queue[i];
+		for (i = 0; i < QDMA_QUEUES; i++) {
+			temp = fsl_qdma->queue[k];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -391,19 +395,20 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			 */
 
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQDPA_SADDR(k));
+				    block + FSL_QDMA_BCQDPA_SADDR(i));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEDPA_SADDR(k));
+				    block + FSL_QDMA_BCQEDPA_SADDR(i));
 			qdma_writel(lower_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEPA_SADDR(k));
+				    block + FSL_QDMA_BCQEPA_SADDR(i));
 			qdma_writel(upper_32_bits(temp->bus_addr),
-				    block + FSL_QDMA_BCQEEPA_SADDR(k));
+				    block + FSL_QDMA_BCQEEPA_SADDR(i));
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
-			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
-			qdma_writel(reg, block + FSL_QDMA_BCQMR(k));
+			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+			temp->le_cqmr = reg;
+			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
 			k++;
 		}
 
@@ -423,18 +428,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEEPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEPAR);
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEDPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQDPAR);
+		temp = fsl_qdma->status[j];
+		qdma_writel(upper_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEEPAR);
+		qdma_writel(lower_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEPAR);
+		qdma_writel(upper_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQEDPAR);
+		qdma_writel(lower_32_bits(temp->bus_addr),
+			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -443,7 +445,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+		val = ilog2_qsize(temp->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -455,52 +457,51 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-
 static int
 fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
-				  uint64_t flags, dma_addr_t dst,
-				  dma_addr_t src, size_t len)
+	uint64_t flags, dma_addr_t dst,
+	dma_addr_t src, size_t len)
 {
-	void *block = fsl_queue->queue_base;
+	uint8_t *block = fsl_queue->block_vir;
 	struct fsl_qdma_format *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
-	u32 cfg = 0;
+	uint32_t cfg = 0;
 #endif
 
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	u32 reg;
+	uint32_t reg;
 
 	/* retrieve and store the register value in big endian
 	 * to avoid bits swap
 	 */
 	reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->id));
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
 		DPAA_QDMA_ERR("QDMA Engine is busy\n");
-		return -1;
+		return -EBUSY;
 	}
 #else
 	/* check whether critical watermark level reached,
 	 * below check is valid for only single queue per block
 	 */
-	if ((fsl_queue->stats.submitted - fsl_queue->stats.completed)
-			>= QDMA_QUEUE_CR_WM) {
+	if (fsl_qdma_queue_bd_in_hw(fsl_queue) >= QDMA_QUEUE_CR_WM) {
 		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
-		return -1;
+		return -ENOSPC;
 	}
 #endif
+
 	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
 		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
-		return -1;
+		return -ENOSPC;
 	}
 
-	csgf_src = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
-		   QDMA_SGF_SRC_OFF;
-	csgf_dest = (struct fsl_qdma_format *)fsl_queue->virt_addr[fsl_queue->ci] +
-		    QDMA_SGF_DST_OFF;
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = (struct fsl_qdma_sdf *)fsl_queue->desc_virt_addr[fsl_queue->ci];
+	sdf = fsl_queue->df[fsl_queue->ci];
 	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
 			FSL_QDMA_CMD_RWTTYPE_OFFSET);
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
@@ -527,67 +528,57 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
 		fsl_queue->ci = 0;
 
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		qdma_writel_be(FSL_QDMA_BCQMR_EI,
-			       block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
 		fsl_queue->stats.submitted++;
 	} else {
 		fsl_queue->pending++;
 	}
-	return 0;
+
+	if (fsl_queue->ci)
+		return fsl_queue->ci - 1;
+	else
+		return fsl_queue->n_cq;
 }
 
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	      uint32_t info_sz)
+	__rte_unused uint32_t info_sz)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
-
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 4;
-	dev_info->max_desc = DPAADMA_MAX_DESC;
-	dev_info->min_desc = DPAADMA_MIN_DESC;
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = fsl_qdma->n_queues;
+	dev_info->max_desc = QDMA_QUEUE_SIZE;
+	dev_info->min_desc = QDMA_QUEUE_SIZE;
 
 	return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+	uint16_t vchan)
 {
-	u32 i;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue;
-
-	if (fsl_qdma->free_block_id == QDMA_BLOCKS) {
-		DPAA_QDMA_ERR("Maximum 4 queues can be configured\n");
-		return -1;
-	}
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-	i = fsl_qdma->free_block_id * QDMA_QUEUES;
+	if (fsl_queue->used)
+		return 0;
 
-	fsl_queue = fsl_qdma->queue[i];
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-			FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
 	if (ret)
 		return ret;
 
-	fsl_qdma->vchan_map[vchan] = i;
-	fsl_qdma->free_block_id++;
+	fsl_queue->used = 1;
+
 	return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-		    __rte_unused const struct rte_dma_conf *dev_conf,
-		    __rte_unused uint32_t conf_sz)
+	__rte_unused const struct rte_dma_conf *dev_conf,
+	__rte_unused uint32_t conf_sz)
 {
 	return 0;
 }
@@ -606,9 +597,9 @@ dpaa_qdma_close(__rte_unused struct rte_dma_dev *dev)
 
 static int
 dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
-		      uint16_t vchan,
-		      __rte_unused const struct rte_dma_vchan_conf *conf,
-		      __rte_unused uint32_t conf_sz)
+	uint16_t vchan,
+	__rte_unused const struct rte_dma_vchan_conf *conf,
+	__rte_unused uint32_t conf_sz)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
 
@@ -618,13 +609,13 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-	void *block = fsl_queue->queue_base;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	void *block = fsl_queue->block_vir;
 
 	while (fsl_queue->pending) {
-		qdma_writel_be(FSL_QDMA_BCQMR_EI, block + FSL_QDMA_BCQMR(fsl_queue->id));
+		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
 		fsl_queue->pending--;
 		fsl_queue->stats.submitted++;
 	}
@@ -634,37 +625,31 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-		  rte_iova_t src, rte_iova_t dst,
-		  uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
-	int ret, idx;
-
-	idx = (uint16_t)(fsl_queue->stats.submitted + fsl_queue->pending);
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	int ret;
 
-	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, (dma_addr_t)dst, (dma_addr_t)src, length);
-	if (ret < 0)
-		return ret;
+	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
 
-	return idx;
+	return ret;
 }
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			 const uint16_t nb_cpls, uint16_t *last_idx,
-			 enum rte_dma_status_code *st)
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 	void *status = fsl_qdma->status_base;
 	int intr;
 
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-					       nb_cpls, st);
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+			nb_cpls, st);
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 		if (intr) {
@@ -690,8 +675,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	}
 
 	fsl_queue->stats.completed += ret;
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+	if (last_idx) {
+		if (unlikely(!fsl_queue->complete))
+			*last_idx = fsl_queue->n_cq - 1;
+		else
+			*last_idx = fsl_queue->complete - 1;
+	}
 
 	return ret;
 }
@@ -699,21 +688,20 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-		  uint16_t vchan, const uint16_t nb_cpls,
-		  uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
 	int intr;
 #endif
 
 	*has_error = false;
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue->queue_base,
-					       nb_cpls, NULL);
+	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+			nb_cpls, NULL);
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
@@ -740,23 +728,27 @@ dpaa_qdma_dequeue(void *dev_private,
 	}
 #endif
 	fsl_queue->stats.completed += ret;
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(fsl_queue->stats.completed - 1);
+	if (last_idx) {
+		if (unlikely(!fsl_queue->complete))
+			*last_idx = fsl_queue->n_cq - 1;
+		else
+			*last_idx = fsl_queue->complete - 1;
+	}
+
 	return ret;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
 		return -EINVAL;
-	if (rte_stats == NULL)
+	if (!rte_stats)
 		return -EINVAL;
 
 	*rte_stats = *stats;
@@ -768,10 +760,9 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
-	fsl_queue->stats = (struct rte_dma_stats){0};
+	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
 	return 0;
 }
@@ -779,9 +770,8 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
-	const struct fsl_qdma_engine *fsl_qdma  = dev_private;
-	struct fsl_qdma_queue *fsl_queue =
-		fsl_qdma->queue[fsl_qdma->vchan_map[vchan]];
+	const struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -804,8 +794,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i, k = 0;
-	int j;
+	uint32_t i, j, k;
 
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
@@ -814,47 +803,63 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		return -1;
+		return ccsr_qdma_fd;
 	}
 
-	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 	phys_addr = QDMA_CCSR_BASE;
-	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-					 PROT_WRITE, MAP_SHARED,
-					 ccsr_qdma_fd, phys_addr);
+	fsl_qdma->reg_base = mmap(NULL, regs_size,
+		PROT_READ | PROT_WRITE, MAP_SHARED,
+		ccsr_qdma_fd, phys_addr);
 
 	close(ccsr_qdma_fd);
-	if (fsl_qdma->ctrl_base == MAP_FAILED) {
-		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-		       "size %d\n", phys_addr, regs_size);
-		return -1;
+	if (fsl_qdma->reg_base == MAP_FAILED) {
+		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIu64"), size(%d)",
+			phys_addr, regs_size);
+		return -ENOMEM;
 	}
 
-	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-	fsl_qdma->status = rte_malloc("status queue", sizeof(struct fsl_qdma_queue) * 4, 0);
-	if (!fsl_qdma->status)
+	fsl_qdma->ctrl_base =
+		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+	fsl_qdma->status_base =
+		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+	fsl_qdma->block_base =
+		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+	fsl_qdma->status = rte_malloc("status queue",
+		sizeof(struct fsl_qdma_queue) * QDMA_BLOCKS, 0);
+	if (!fsl_qdma->status) {
+		ret = -ENOMEM;
 		goto err;
+	}
 
-	fsl_qdma->queue = rte_malloc("cmd queue", sizeof(struct fsl_qdma_queue) * 32, 0);
+	fsl_qdma->queue = rte_malloc("cmd queue",
+		sizeof(struct fsl_qdma_queue) * fsl_qdma->n_queues, 0);
 	if (!fsl_qdma->queue) {
 		rte_free(fsl_qdma->status);
+		ret = -ENOMEM;
 		goto err;
 	}
 
+	k = 0;
 	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue(fsl_qdma, i);
-		if (!fsl_qdma->status[i])
+		fsl_qdma->status[i] =
+			fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (!fsl_qdma->status[i]) {
+			ret = -ENOMEM;
 			goto mem_free;
+		}
 		j = 0;
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++) {
-			fsl_qdma->queue[k] = fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
-			if (!fsl_qdma->queue[k])
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_qdma->queue[k] =
+				fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
+			if (!fsl_qdma->queue[k]) {
+				ret = -ENOMEM;
 				goto mem_free;
-			j++;
+			}
+			k++;
 		}
-
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
@@ -867,17 +872,20 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	return 0;
 
 mem_free:
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
-			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
-		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-	}
-	rte_free(fsl_qdma->status);
+	for (j = 0; j < k; j++)
+		fsl_qdma_free_queue_resources(fsl_qdma->queue[j]);
+
+	for (j = 0; j < i; j++)
+		fsl_qdma_free_status_queue(fsl_qdma->status[j]);
+
+	if (fsl_qdma->status)
+		rte_free(fsl_qdma->status);
 err:
-	rte_free(fsl_qdma->queue);
+	if (fsl_qdma->queue)
+		rte_free(fsl_qdma->queue);
 	munmap(fsl_qdma->ctrl_base, regs_size);
 
-	return -1;
+	return ret;
 }
 
 static int
@@ -921,13 +929,13 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	uint32_t i, k;
+	uint32_t i;
 
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		for (k = (i * QDMA_QUEUES); k < ((i * QDMA_QUEUES) + QDMA_QUEUES); k++)
-			fsl_qdma_free_queue_resources(fsl_qdma->queue[k]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
 		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
-	}
+
+	for (i = 0; i < fsl_qdma->num_blocks * QDMA_QUEUES; i++)
+		fsl_qdma_free_queue_resources(fsl_qdma->queue[i]);
 
 	rte_free(fsl_qdma->queue);
 	rte_free(fsl_qdma->status);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 8a4517a70a..25954ef3a4 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -11,7 +11,6 @@
 #define BIT(nr)		(1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES	5
 
 #ifndef GENMASK
@@ -20,6 +19,12 @@
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
 #define FSL_QDMA_DEDR			0xe04
@@ -54,19 +59,18 @@
 #define FSL_QDMA_QUEUE_MAX		8
 
 #define FSL_QDMA_BCQMR_EN		0x80000000
-#define FSL_QDMA_BCQMR_EI_BE		0x40
+#define FSL_QDMA_BCQMR_EI		0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
 /* Update the value appropriately whenever QDMA_QUEUE_SIZE
  * changes.
  */
-#define FSL_QDMA_BCQMR_EI		0x20c0
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
-#define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
 #define FSL_QDMA_BSQMR_DI		0xc0
 
@@ -75,8 +79,6 @@
 #define FSL_QDMA_DMR_DQD		0x40000000
 #define FSL_QDMA_DSR_DB			0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE	64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
@@ -106,16 +108,11 @@
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		64
+#define QDMA_STATUS_SIZE		QDMA_QUEUE_SIZE
 #define QDMA_CCSR_BASE			0x8380000
-#define VIRT_CHANNELS			32
 #define QDMA_BLOCK_OFFSET		0x10000
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
-#define QDMA_DELAY			1000
-#define QDMA_SGF_SRC_OFF		2
-#define QDMA_SGF_DST_OFF		3
-#define QDMA_DESC_OFF			1
 #define QDMA_QUEUE_CR_WM		32
 
 #define QDMA_BIG_ENDIAN			1
@@ -134,64 +131,76 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
 	(((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
 struct fsl_qdma_format {
-	__le32 status; /* ser, status */
-	__le32 cfg;	/* format, offset */
+	uint32_t status; /* ser, status */
+	uint32_t cfg;	/* format, offset */
 	union {
 		struct {
-			__le32 addr_lo;	/* low 32-bits of 40-bit address */
-			u8 addr_hi;	/* high 8-bits of 40-bit address */
-			u8 __reserved1[2];
-			u8 cfg8b_w1; /* dd, queue */
+			uint32_t addr_lo; /* low 32-bits of 40-bit address */
+			uint8_t addr_hi; /* high 8-bits of 40-bit address */
+			uint8_t __reserved1[2];
+			uint8_t cfg8b_w1; /* dd, queue */
 		};
-		__le64 data;
+		uint64_t data;
 	};
 };
 
 /* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	__le32 rev3;
-	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	__le32 rev5;
-	__le32 cmd;
+	uint32_t rev3;
+	uint32_t cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+	uint32_t rev5;
+	uint32_t cmd;
 };
 
 /* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	__le32 rev1;
-	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	__le32 rev3;
-	__le32 cmd;
+	uint32_t rev1;
+	uint32_t cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+	uint32_t rev3;
+	uint32_t cmd;
+};
+
+struct fsl_qdma_df {
+	struct fsl_qdma_sdf sdf;
+	struct fsl_qdma_ddf ddf;
+};
+
+struct fsl_qdma_cmpd_ft {
+	struct fsl_qdma_format desc_buf;
+	struct fsl_qdma_format desc_sbuf;
+	struct fsl_qdma_format desc_dbuf;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format	*virt_head;
-	void                    **virt_addr;
-	u8			ci;
-	u8			n_cq;
-	u8			id;
-	void			*queue_base;
-	struct fsl_qdma_format	*cq;
-	struct rte_dma_stats	stats;
-	u8			pending;
-	dma_addr_t		bus_addr;
-	void			**desc_virt_addr;
+	struct fsl_qdma_format *cmd_desc;
+	int used;
+	struct fsl_qdma_cmpd_ft **ft;
+	uint16_t ci;
+	uint16_t complete;
+	uint16_t n_cq;
+	uint8_t block_id;
+	uint8_t queue_id;
+	void *block_vir;
+	uint32_t le_cqmr;
+	struct fsl_qdma_format *cq;
+	struct rte_dma_stats stats;
+	uint8_t pending;
+	dma_addr_t bus_addr;
+	struct fsl_qdma_df **df;
 };
 
 struct fsl_qdma_engine {
-	void			*ctrl_base;
-	void			*status_base;
-	void			*block_base;
-	u32			n_queues;
-	struct fsl_qdma_queue	**queue;
-	struct fsl_qdma_queue	**status;
-	u32			num_blocks;
-	u8			free_block_id;
-	u32			vchan_map[4];
-	int			block_offset;
+	void *reg_base;
+	void *ctrl_base;
+	void *status_base;
+	void *block_base;
+	uint32_t n_queues;
+	struct fsl_qdma_queue **queue;
+	struct fsl_qdma_queue **status;
+	uint32_t num_blocks;
+	int block_offset;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 21/30] dma/dpaa: dequeue status queue
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (19 preceding siblings ...)
  2024-07-22 16:39     ` [v3 20/30] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
                       ` (8 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

To support multiple command queues of each block, status queue
need support to notify which command queue of block is completed.

The multiple command queues are balanced to blocks in setup.
If multiple command queues are enabled in one block,
de-queue status is performed instead of checking completion.

DQ operation is not performed in silent mode.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 351 +++++++++++++++++++++--------------
 drivers/dma/dpaa/dpaa_qdma.h |  38 +++-
 2 files changed, 242 insertions(+), 147 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index dc17aa4520..825dead5cf 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -34,6 +34,30 @@ qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
 	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
 }
 
+static inline void
+qdma_ccdf_set_queue(struct fsl_qdma_format *ccdf,
+	uint8_t queue_idx)
+{
+	ccdf->queue = queue_idx;
+}
+
+static inline int
+qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
+	uint8_t *queue_idx)
+{
+	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
+
+	if (addr && queue_idx)
+		*queue_idx = ccdf->queue;
+	if (addr) {
+		ccdf->addr_hi = 0;
+		ccdf->addr_lo = 0;
+		return true;
+	}
+
+	return false;
+}
+
 static inline void
 qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
 {
@@ -110,7 +134,8 @@ dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 	if (!virt_addr)
 		return NULL;
 
-	*phy_addr = rte_mem_virt2iova(virt_addr);
+	if (phy_addr)
+		*phy_addr = rte_mem_virt2iova(virt_addr);
 
 	return virt_addr;
 }
@@ -121,6 +146,7 @@ dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
 static int
 fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
+	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_format *ccdf;
@@ -175,7 +201,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		ccdf = queue->cmd_desc;
 		qdma_desc_addr_set64(ccdf, phy_ft);
 		qdma_ccdf_set_format(ccdf, 0);
-		qdma_ccdf_set_ser(ccdf, 0);
+		if (!fsl_qdma->is_silent)
+			qdma_ccdf_set_ser(ccdf, 0);
+		qdma_ccdf_set_queue(ccdf, queue->queue_id);
 		queue->cmd_desc++;
 	}
 	queue->cmd_desc = head;
@@ -192,105 +220,91 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	return -ENOMEM;
 }
 
-static struct fsl_qdma_queue *
+static int
 fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 	int queue_id, int block_id)
 {
-	struct fsl_qdma_queue *queue_temp;
+	struct fsl_qdma_queue *cmd_queue;
 	uint32_t queue_size;
 
-	queue_temp = rte_zmalloc("qdma: queue head",
-		sizeof(struct fsl_qdma_queue), 0);
-	if (!queue_temp) {
-		DPAA_QDMA_ERR("no memory to allocate queues\n");
-		return NULL;
-	}
+	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
+	cmd_queue->engine = fsl_qdma;
+
 	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
 
-	queue_temp->cq = dma_pool_alloc(queue_size,
-		queue_size, &queue_temp->bus_addr);
-	if (!queue_temp->cq) {
-		rte_free(queue_temp);
-		return NULL;
-	}
+	cmd_queue->cq = dma_pool_alloc(queue_size,
+		queue_size, &cmd_queue->bus_addr);
+	if (!cmd_queue->cq)
+		return -ENOMEM;
 
-	memset(queue_temp->cq, 0x0, queue_size);
+	memset(cmd_queue->cq, 0x0, queue_size);
 
-	queue_temp->block_vir = fsl_qdma->block_base +
+	cmd_queue->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
-	queue_temp->n_cq = QDMA_QUEUE_SIZE;
-	queue_temp->queue_id = queue_id;
-	queue_temp->pending = 0;
-	queue_temp->cmd_desc = queue_temp->cq;
-	queue_temp->ft = rte_malloc("Compound Frame Table",
+	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
+	cmd_queue->queue_id = queue_id;
+	cmd_queue->block_id = block_id;
+	cmd_queue->pending = 0;
+	cmd_queue->cmd_desc = cmd_queue->cq;
+	cmd_queue->ft = rte_malloc("Compound Frame Table",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->ft) {
-		rte_free(queue_temp->cq);
-		rte_free(queue_temp);
-		return NULL;
+	if (!cmd_queue->ft) {
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
 	}
-	queue_temp->df = rte_malloc("Descriptor Buffer",
+	cmd_queue->df = rte_malloc("Descriptor Buffer",
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!queue_temp->df) {
-		rte_free(queue_temp->ft);
-		rte_free(queue_temp->cq);
-		rte_free(queue_temp);
-		return NULL;
+	if (!cmd_queue->df) {
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
 	}
-	memset(&queue_temp->stats, 0, sizeof(struct rte_dma_stats));
+	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
 
-	return queue_temp;
+	return 0;
 }
 
 static void
-fsl_qdma_free_queue_resources(struct fsl_qdma_queue *queue)
+fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 {
 	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
-	rte_free(queue);
 }
 
-static struct fsl_qdma_queue *
+static void
+fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
+{
+	rte_free(queue->cq);
+}
+
+
+static int
 fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	uint32_t block_id)
 {
-	struct fsl_qdma_queue *status_head;
+	struct fsl_qdma_status_queue *status;
 	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
+	status = &fsl_qdma->stat_queues[block_id];
+	status->engine = fsl_qdma;
 
-	status_head = rte_zmalloc("qdma: status head",
-		sizeof(*status_head), 0);
-	if (!status_head)
-		return NULL;
+	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
 
-	/*
-	 * Buffer for queue command
-	 */
-	status_head->cq = dma_pool_alloc(status_size,
-		status_size, &status_head->bus_addr);
+	status->cq = dma_pool_alloc(status_size,
+		status_size, &status->bus_addr);
 
-	if (!status_head->cq) {
-		rte_free(status_head);
-		return NULL;
-	}
+	if (!status->cq)
+		return -ENOMEM;
 
-	memset(status_head->cq, 0x0, status_size);
-	status_head->n_cq = status_size;
-	status_head->cmd_desc = status_head->cq;
-	status_head->block_id = block_id;
-	status_head->block_vir = fsl_qdma->block_base +
+	memset(status->cq, 0x0, status_size);
+	status->n_cq = QDMA_STATUS_SIZE;
+	status->complete = 0;
+	status->block_id = block_id;
+	status->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	return status_head;
-}
-
-static void
-fsl_qdma_free_status_queue(struct fsl_qdma_queue *status)
-{
-	rte_free(status->cq);
-	rte_free(status);
+	return 0;
 }
 
 static int
@@ -358,6 +372,7 @@ fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
 		fsl_queue->complete++;
 		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
 			fsl_queue->complete = 0;
+		fsl_queue->stats.completed++;
 
 		count++;
 	}
@@ -368,9 +383,10 @@ static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
 	struct fsl_qdma_queue *temp;
+	struct fsl_qdma_status_queue *temp_stat;
 	void *ctrl = fsl_qdma->ctrl_base;
 	uint8_t *block;
-	uint32_t i, j, k = 0;
+	uint32_t i, j;
 	uint32_t reg;
 	int ret, val;
 
@@ -385,7 +401,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 		for (i = 0; i < QDMA_QUEUES; i++) {
-			temp = fsl_qdma->queue[k];
+			temp = &fsl_qdma->cmd_queues[j][i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -409,7 +425,6 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
 			temp->le_cqmr = reg;
 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
-			k++;
 		}
 
 		/*
@@ -419,7 +434,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 */
 
 		qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
-			    block + FSL_QDMA_SQCCMR);
+			block + FSL_QDMA_SQCCMR);
 
 		/*
 		 * Initialize status queue registers to point to the first
@@ -428,14 +443,14 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		temp = fsl_qdma->status[j];
-		qdma_writel(upper_32_bits(temp->bus_addr),
+		temp_stat = &fsl_qdma->stat_queues[j];
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEEPAR);
-		qdma_writel(lower_32_bits(temp->bus_addr),
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEPAR);
-		qdma_writel(upper_32_bits(temp->bus_addr),
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQEDPAR);
-		qdma_writel(lower_32_bits(temp->bus_addr),
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
 			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
@@ -445,7 +460,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2_qsize(temp->n_cq);
+		val = ilog2_qsize(temp_stat->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -560,8 +575,29 @@ static int
 dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 	uint16_t vchan)
 {
-	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	int ret, i, j, found = 0;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	if (fsl_queue) {
+		found = 1;
+		goto queue_found;
+	}
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_queue = &fsl_qdma->cmd_queues[i][j];
+
+			if (fsl_queue->channel_id == vchan) {
+				found = 1;
+				fsl_qdma->chan[vchan] = fsl_queue;
+				goto queue_found;
+			}
+		}
+	}
+
+queue_found:
+	if (!found)
+		return -ENXIO;
 
 	if (fsl_queue->used)
 		return 0;
@@ -571,15 +607,19 @@ dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 		return ret;
 
 	fsl_queue->used = 1;
+	fsl_qdma->block_queues[fsl_queue->block_id]++;
 
 	return 0;
 }
 
 static int
-dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-	__rte_unused const struct rte_dma_conf *dev_conf,
+dpaa_qdma_configure(struct rte_dma_dev *dmadev,
+	const struct rte_dma_conf *dev_conf,
 	__rte_unused uint32_t conf_sz)
 {
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+
+	fsl_qdma->is_silent = dev_conf->enable_silent;
 	return 0;
 }
 
@@ -610,7 +650,7 @@ static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *block = fsl_queue->block_vir;
 
 	while (fsl_queue->pending) {
@@ -629,7 +669,7 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	uint32_t length, uint64_t flags)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	int ret;
 
 	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
@@ -637,6 +677,42 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	return ret;
 }
 
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
+{
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_format *cq;
+	uint16_t start, count = 0;
+	uint8_t qid;
+	int ret;
+
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	do {
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+			cmd_queue->stats.completed++;
+			cmd_queue->complete++;
+			if (unlikely(cmd_queue->complete == cmd_queue->n_cq))
+				cmd_queue->complete = 0;
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
@@ -644,12 +720,22 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
 	int intr;
 
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-			nb_cpls, st);
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode\n");
+		return 0;
+	}
+
+	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
+		ret = dpaa_qdma_block_dequeue(fsl_qdma,
+				fsl_queue->block_id);
+	} else {
+		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+				nb_cpls, st);
+	}
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
 		if (intr) {
@@ -674,7 +760,6 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 		}
 	}
 
-	fsl_queue->stats.completed += ret;
 	if (last_idx) {
 		if (unlikely(!fsl_queue->complete))
 			*last_idx = fsl_queue->n_cq - 1;
@@ -693,15 +778,26 @@ dpaa_qdma_dequeue(void *dev_private,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	int ret;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
 	int intr;
 #endif
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode\n");
+
+		return 0;
+	}
+
 	*has_error = false;
-	ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-			nb_cpls, NULL);
+	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
+		ret = dpaa_qdma_block_dequeue(fsl_qdma,
+				fsl_queue->block_id);
+	} else {
+		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
+				nb_cpls, NULL);
+	}
 #ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	if (!ret) {
 		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
@@ -727,7 +823,6 @@ dpaa_qdma_dequeue(void *dev_private,
 		}
 	}
 #endif
-	fsl_queue->stats.completed += ret;
 	if (last_idx) {
 		if (unlikely(!fsl_queue->complete))
 			*last_idx = fsl_queue->n_cq - 1;
@@ -743,7 +838,7 @@ dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
 	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -760,7 +855,7 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
 	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
@@ -771,7 +866,7 @@ static uint16_t
 dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 {
 	const struct fsl_qdma_engine *fsl_qdma = dev_private;
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue[vchan];
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
 	return fsl_queue->n_cq - fsl_queue->pending;
 }
@@ -827,37 +922,19 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	fsl_qdma->block_base =
 		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
 
-	fsl_qdma->status = rte_malloc("status queue",
-		sizeof(struct fsl_qdma_queue) * QDMA_BLOCKS, 0);
-	if (!fsl_qdma->status) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	fsl_qdma->queue = rte_malloc("cmd queue",
-		sizeof(struct fsl_qdma_queue) * fsl_qdma->n_queues, 0);
-	if (!fsl_qdma->queue) {
-		rte_free(fsl_qdma->status);
-		ret = -ENOMEM;
-		goto err;
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (ret)
+			goto mem_free;
 	}
 
 	k = 0;
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		fsl_qdma->status[i] =
-			fsl_qdma_prep_status_queue(fsl_qdma, i);
-		if (!fsl_qdma->status[i]) {
-			ret = -ENOMEM;
-			goto mem_free;
-		}
-		j = 0;
-		for (j = 0; j < QDMA_QUEUES; j++) {
-			fsl_qdma->queue[k] =
-				fsl_qdma_alloc_queue_resources(fsl_qdma, j, i);
-			if (!fsl_qdma->queue[k]) {
-				ret = -ENOMEM;
+	for (i = 0; i < QDMA_QUEUES; i++) {
+		for (j = 0; j < QDMA_BLOCKS; j++) {
+			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
+			if (ret)
 				goto mem_free;
-			}
+			fsl_qdma->cmd_queues[j][i].channel_id = k;
 			k++;
 		}
 	}
@@ -865,24 +942,20 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n");
-		rte_free(fsl_qdma->status);
 		goto mem_free;
 	}
 
 	return 0;
 
 mem_free:
-	for (j = 0; j < k; j++)
-		fsl_qdma_free_queue_resources(fsl_qdma->queue[j]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-	for (j = 0; j < i; j++)
-		fsl_qdma_free_status_queue(fsl_qdma->status[j]);
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
 
-	if (fsl_qdma->status)
-		rte_free(fsl_qdma->status);
-err:
-	if (fsl_qdma->queue)
-		rte_free(fsl_qdma->queue);
 	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	return ret;
@@ -929,16 +1002,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	uint32_t i;
+	uint32_t i, j, regs_size;
 
-	for (i = 0; i < fsl_qdma->num_blocks; i++)
-		fsl_qdma_free_status_queue(fsl_qdma->status[i]);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 
-	for (i = 0; i < fsl_qdma->num_blocks * QDMA_QUEUES; i++)
-		fsl_qdma_free_queue_resources(fsl_qdma->queue[i]);
+	for (i = 0; i < QDMA_BLOCKS; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-	rte_free(fsl_qdma->queue);
-	rte_free(fsl_qdma->status);
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
+
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 25954ef3a4..da2dbf36c9 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -107,13 +107,13 @@
 #define COMMAND_QUEUE_OVERFLOW		10
 
 /* qdma engine attribute */
-#define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		QDMA_QUEUE_SIZE
-#define QDMA_CCSR_BASE			0x8380000
-#define QDMA_BLOCK_OFFSET		0x10000
-#define QDMA_BLOCKS			4
-#define QDMA_QUEUES			8
-#define QDMA_QUEUE_CR_WM		32
+#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
+#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
+#define QDMA_CCSR_BASE 0x8380000
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
+#define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -140,7 +140,9 @@ struct fsl_qdma_format {
 			uint32_t addr_lo; /* low 32-bits of 40-bit address */
 			uint8_t addr_hi; /* high 8-bits of 40-bit address */
 			uint8_t __reserved1[2];
-			uint8_t cfg8b_w1; /* dd, queue */
+			uint8_t queue:3;
+			uint8_t rsv:3;
+			uint8_t dd:2;
 		};
 		uint64_t data;
 	};
@@ -182,6 +184,7 @@ struct fsl_qdma_queue {
 	uint16_t n_cq;
 	uint8_t block_id;
 	uint8_t queue_id;
+	uint8_t channel_id;
 	void *block_vir;
 	uint32_t le_cqmr;
 	struct fsl_qdma_format *cq;
@@ -189,6 +192,18 @@ struct fsl_qdma_queue {
 	uint8_t pending;
 	dma_addr_t bus_addr;
 	struct fsl_qdma_df **df;
+	void *engine;
+};
+
+struct fsl_qdma_status_queue {
+	uint16_t n_cq;
+	uint16_t complete;
+	uint8_t block_id;
+	void *block_vir;
+	struct fsl_qdma_format *cq;
+	struct rte_dma_stats stats;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
 struct fsl_qdma_engine {
@@ -197,10 +212,13 @@ struct fsl_qdma_engine {
 	void *status_base;
 	void *block_base;
 	uint32_t n_queues;
-	struct fsl_qdma_queue **queue;
-	struct fsl_qdma_queue **status;
+	uint8_t block_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES];
+	struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
 	uint32_t num_blocks;
 	int block_offset;
+	int is_silent;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 22/30] dma/dpaa: add Scatter Gather support
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (20 preceding siblings ...)
  2024-07-22 16:39     ` [v3 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 23/30] dma/dpaa: block dequeue Gagandeep Singh
                       ` (7 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Perform SG operation by copy_sg callback of DMA lib or
burst request from application.
Perform Simple operation if burst number is 1.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 856 ++++++++++++++++++++++++++---------
 drivers/dma/dpaa/dpaa_qdma.h | 184 +++++---
 2 files changed, 763 insertions(+), 277 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 825dead5cf..f1ad60d1f2 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,45 +4,31 @@
 
 #include <bus_dpaa_driver.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_pmd_dpaax_qdma.h>
 
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static int s_data_validation;
+static int s_hw_err_check;
+static int s_sg_disable;
+
 static inline void
-qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
 {
 	ccdf->addr_hi = upper_32_bits(addr);
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline void
-qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-{
-	ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
-}
-
-static inline int
-qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
-{
-	return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_STATUS;
-}
-
-static inline void
-qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
+qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
 {
-	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
-}
-
-static inline void
-qdma_ccdf_set_queue(struct fsl_qdma_format *ccdf,
-	uint8_t queue_idx)
-{
-	ccdf->queue = queue_idx;
+	sge->addr_hi = upper_32_bits(addr);
+	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline int
-qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
+qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
 	uint8_t *queue_idx)
 {
 	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
@@ -58,18 +44,6 @@ qdma_ccdf_get_queue(struct fsl_qdma_format *ccdf,
 	return false;
 }
 
-static inline void
-qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
-{
-	csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
-}
-
-static inline void
-qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
-{
-	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
-}
-
 static inline int ilog2(int x)
 {
 	int log = 0;
@@ -126,11 +100,11 @@ qdma_writel_be(uint32_t val, void *addr)
 }
 
 static void *
-dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
-	virt_addr = rte_malloc("dma pool alloc", size, aligned);
+	virt_addr = rte_zmalloc(nm, size, aligned);
 	if (!virt_addr)
 		return NULL;
 
@@ -149,28 +123,46 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *ccdf;
+	struct fsl_qdma_comp_cmd_desc *ccdf;
 	uint16_t i, j;
-	struct fsl_qdma_format *head;
 	struct fsl_qdma_cmpd_ft *ft;
 	struct fsl_qdma_df *df;
 
-	head = queue->cmd_desc;
-
 	for (i = 0; i < queue->n_cq; i++) {
 		dma_addr_t phy_ft = 0, phy_df = 0;
 
-		queue->ft[i] =
-			dma_pool_alloc(sizeof(struct fsl_qdma_cmpd_ft),
-				RTE_CACHE_LINE_SIZE, &phy_ft);
+		queue->ft[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_cmpd_ft),
+			RTE_CACHE_LINE_SIZE, &phy_ft);
 		if (!queue->ft[i])
 			goto fail;
-
-		queue->df[i] =
-			dma_pool_alloc(sizeof(struct fsl_qdma_df),
-				RTE_CACHE_LINE_SIZE, &phy_df);
+		if (((uint64_t)queue->ft[i]) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
+				i, queue->ft[i]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
+			goto fail;
+		}
+		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
+				i, &queue->ft[i]->desc_ssge[0]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
+			goto fail;
+		}
+		queue->ft[i]->phy_ssge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
+		queue->ft[i]->phy_dsge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
+
+		queue->df[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_df),
+			RTE_CACHE_LINE_SIZE, &phy_df);
 		if (!queue->df[i]) {
 			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
 
@@ -182,31 +174,25 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		sdf = &df->sdf;
 		ddf = &df->ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(&ft->desc_buf, phy_df);
-
+		qdma_desc_sge_addr_set64(&ft->desc_buf, phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(&ft->desc_buf,
-			sizeof(struct fsl_qdma_df));
+		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
+
 		/* Descriptor Buffer */
-		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+		sdf->prefetch = 1;
 #endif
-		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
-				FSL_QDMA_CMD_LWC_OFFSET);
+		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
+		ddf->lwc = FSL_QDMA_CMD_LWC;
 
-		ccdf = queue->cmd_desc;
+		ccdf = &queue->cq[i];
 		qdma_desc_addr_set64(ccdf, phy_ft);
-		qdma_ccdf_set_format(ccdf, 0);
+		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
 		if (!fsl_qdma->is_silent)
-			qdma_ccdf_set_ser(ccdf, 0);
-		qdma_ccdf_set_queue(ccdf, queue->queue_id);
-		queue->cmd_desc++;
+			ccdf->ser = 1;
+		ccdf->queue = queue->queue_id;
 	}
-	queue->cmd_desc = head;
 	queue->ci = 0;
 
 	return 0;
@@ -226,40 +212,107 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 {
 	struct fsl_qdma_queue *cmd_queue;
 	uint32_t queue_size;
+	char nm[RTE_MEMZONE_NAMESIZE];
 
 	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
 	cmd_queue->engine = fsl_qdma;
 
-	queue_size = sizeof(struct fsl_qdma_format) * QDMA_QUEUE_SIZE;
+	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
+		QDMA_QUEUE_SIZE;
 
-	cmd_queue->cq = dma_pool_alloc(queue_size,
+	sprintf(nm, "Command queue_%d_%d",
+		block_id, queue_id);
+	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
 		queue_size, &cmd_queue->bus_addr);
-	if (!cmd_queue->cq)
+	if (!cmd_queue->cq) {
+		DPAA_QDMA_ERR("%s alloc failed!", nm);
 		return -ENOMEM;
-
-	memset(cmd_queue->cq, 0x0, queue_size);
+	}
 
 	cmd_queue->block_vir = fsl_qdma->block_base +
 		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
 	cmd_queue->queue_id = queue_id;
 	cmd_queue->block_id = block_id;
-	cmd_queue->pending = 0;
-	cmd_queue->cmd_desc = cmd_queue->cq;
-	cmd_queue->ft = rte_malloc("Compound Frame Table",
+	cmd_queue->pending_start = 0;
+	cmd_queue->pending_num = 0;
+	cmd_queue->complete_start = 0;
+
+	sprintf(nm, "Compound Table_%d_%d",
+		block_id, queue_id);
+	cmd_queue->ft = rte_zmalloc(nm,
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
 	if (!cmd_queue->ft) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
 	}
-	cmd_queue->df = rte_malloc("Descriptor Buffer",
+	sprintf(nm, "Descriptor Buf_%d_%d",
+		block_id, queue_id);
+	cmd_queue->df = rte_zmalloc(nm,
 			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
 	if (!cmd_queue->df) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "Pending_desc_%d_%d",
+		block_id, queue_id);
+	cmd_queue->pending_desc = rte_zmalloc(nm,
+		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
+	if (!cmd_queue->pending_desc) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->df);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-burst_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_burst = rte_ring_create(nm,
+		QDMA_QUEUE_SIZE * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_burst) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
+	sprintf(nm, "complete-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_desc = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_desc) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-pool-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_pool = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_pool) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_desc);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		rte_free(cmd_queue->df);
+		return -ENOMEM;
+	}
+
 	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
+	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
 
 	return 0;
 }
@@ -270,6 +323,10 @@ fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
+	rte_free(queue->pending_desc);
+	rte_ring_free(queue->complete_burst);
+	rte_ring_free(queue->complete_desc);
+	rte_ring_free(queue->complete_pool);
 }
 
 static void
@@ -289,9 +346,10 @@ fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	status = &fsl_qdma->stat_queues[block_id];
 	status->engine = fsl_qdma;
 
-	status_size = QDMA_STATUS_SIZE * sizeof(struct fsl_qdma_format);
+	status_size = QDMA_STATUS_SIZE *
+		sizeof(struct fsl_qdma_comp_cmd_desc);
 
-	status->cq = dma_pool_alloc(status_size,
+	status->cq = dma_pool_alloc(NULL, status_size,
 		status_size, &status->bus_addr);
 
 	if (!status->cq)
@@ -352,31 +410,116 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
+static void
+fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
+	uint8_t num, struct fsl_qdma_queue *fsl_queue)
+{
+	uint32_t i, j;
+	uint8_t *v_src, *v_dst;
+	char err_msg[512];
+	int offset;
+
+	if (likely(!s_data_validation))
+		return;
+
+	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
+		fsl_queue->block_id,
+		fsl_queue->queue_id);
+	for (i = 0; i < num; i++) {
+		v_src = rte_mem_iova2virt(desc[i]->src);
+		v_dst = rte_mem_iova2virt(desc[i]->dst);
+		for (j = 0; j < desc[i]->len; j++) {
+			if (v_src[j] != v_dst[j]) {
+				sprintf(&err_msg[offset],
+					"job[%"PRId64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
+					desc[i]->flag, v_src, j, v_src[j],
+					v_dst, j, v_dst[j]);
+				DPAA_QDMA_ERR("%s, stop validating!\r\n",
+					err_msg);
+				return;
+			}
+		}
+	}
+}
+
 static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
-	const uint16_t nb_cpls,
-	enum rte_dma_status_code *status)
+fsl_qdma_queue_drain(struct fsl_qdma_queue *fsl_queue)
 {
 	uint32_t reg;
-	int count = 0;
+	int count = 0, ret;
 	uint8_t *block = fsl_queue->block_vir;
+	uint16_t *dq_complete = NULL, drain_num = 0;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
 
-	while (count < nb_cpls) {
+	while (1) {
+		if (rte_ring_free_count(fsl_queue->complete_pool) <
+			(FSL_QDMA_SG_MAX_ENTRY * 2))
+			break;
 		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
 		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			return count;
+			break;
 
 		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
-		if (status != NULL)
-			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
-		fsl_queue->complete++;
-		if (unlikely(fsl_queue->complete >= fsl_queue->n_cq))
-			fsl_queue->complete = 0;
+		ret = rte_ring_dequeue(fsl_queue->complete_burst,
+			(void **)&dq_complete);
+		if (ret)
+			DPAA_QDMA_ERR("DQ desc number failed!\n");
+
+		ret = rte_ring_dequeue_bulk(fsl_queue->complete_desc,
+			(void **)desc, *dq_complete, NULL);
+		if (ret != (*dq_complete)) {
+			DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
+				*dq_complete, ret);
+		}
+
+		fsl_qdma_data_validation(desc, *dq_complete, fsl_queue);
+
+		ret = rte_ring_enqueue_bulk(fsl_queue->complete_pool,
+			(void **)desc, (*dq_complete), NULL);
+		if (ret != (*dq_complete)) {
+			DPAA_QDMA_ERR("EQ %d descs to return queue failed!(%d)\n",
+				*dq_complete, ret);
+		}
+
+		drain_num += *dq_complete;
+		fsl_queue->complete_start =
+			(fsl_queue->complete_start + (*dq_complete)) &
+			(fsl_queue->pending_max - 1);
 		fsl_queue->stats.completed++;
 
 		count++;
 	}
-	return count;
+
+	return drain_num;
+}
+
+static int
+fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *status)
+{
+	int ret;
+	uint16_t dq_num = 0, i;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+
+	ret = fsl_qdma_queue_drain(fsl_queue);
+	if (ret < 0) {
+		DPAA_QDMA_ERR("Drain TX%d/Q%d failed!(%d)",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			ret);
+	}
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+		(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (status) {
+		for (i = 0; i < dq_num; i++)
+			status[i] = RTE_DMA_STATUS_SUCCESSFUL;
+	}
+
+	return dq_num;
 }
 
 static int
@@ -473,87 +616,253 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue,
-	uint64_t flags, dma_addr_t dst,
-	dma_addr_t src, size_t len)
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	int is_burst)
+{
+	uint16_t i, num = fsl_queue->pending_num, idx, start;
+	int ret;
+
+	num = is_burst ? fsl_queue->pending_num : 1;
+
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_ERR("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_ERR("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
 {
 	uint8_t *block = fsl_queue->block_vir;
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	struct fsl_qdma_sdf *sdf;
-	uint32_t cfg = 0;
-#endif
+	int ret;
 
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	uint32_t reg;
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0, drain;
+	uint32_t reg, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
 
-	/* retrieve and store the register value in big endian
-	 * to avoid bits swap
-	 */
-	reg = qdma_readl_be(block +
+	check_num = 0;
+overflow_check:
+	if (unlikely(s_hw_err_check)) {
+		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE)) {
-		DPAA_QDMA_ERR("QDMA Engine is busy\n");
-		return -EBUSY;
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
+
+	if (likely(!overflow))
+		return 0;
+
+	DPAA_QDMA_ERR("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	drain = fsl_qdma_queue_drain(fsl_queue);
+	if (drain <= 0) {
+		drain_num++;
+		if (drain_num > 100) {
+			DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			return -ENOSPC;
+		}
+		goto drain_again;
 	}
-#else
-	/* check whether critical watermark level reached,
-	 * below check is valid for only single queue per block
-	 */
-	if (fsl_qdma_queue_bd_in_hw(fsl_queue) >= QDMA_QUEUE_CR_WM) {
-		DPAA_QDMA_DEBUG("Queue is full, try dequeue first\n");
+	check_num++;
+	if (check_num > 10) {
+		DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
 		return -ENOSPC;
 	}
+	goto overflow_check;
+
+	return -ENOSPC;
+}
+
+static int
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+{
+	uint8_t *block = fsl_queue->block_vir, i;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	uint32_t total_len = 0;
+	uint8_t num = fsl_queue->pending_num;
+	uint16_t start = fsl_queue->pending_start, idx;
+	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
 #endif
 
-	if (unlikely(fsl_queue->pending == fsl_queue->n_cq)) {
-		DPAA_QDMA_DEBUG("Queue is full, try dma submit first\n");
-		return -ENOSPC;
-	}
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = fsl_queue->df[fsl_queue->ci];
-	sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			FSL_QDMA_CMD_RWTTYPE_OFFSET);
+	sdf = &fsl_queue->df[fsl_queue->ci]->sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_PF);
+	sdf->prefetch = 1;
 #endif
-	if (len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSEN);
-		cfg |= rte_cpu_to_le_32(FSL_QDMA_CMD_SSS_STRIDE <<
-					FSL_QDMA_CFG_SSS_OFFSET |
-					FSL_QDMA_CMD_SSS_DISTANCE);
-		sdf->cfg = cfg;
-	} else
-		sdf->cfg = 0;
 #endif
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-	fsl_queue->ci++;
 
-	if (fsl_queue->ci == fsl_queue->n_cq)
-		fsl_queue->ci = 0;
+	if (num == 1) {
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+		if (fsl_queue->pending_desc[start].len >
+			FSL_QDMA_CMD_SSS_DISTANCE) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+			sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
+		} else {
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
+#endif
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	} else if (s_sg_disable) {
+		while (fsl_queue->pending_num > 0) {
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+			if (fsl_queue->pending_desc[start].len >
+				FSL_QDMA_CMD_SSS_DISTANCE) {
+				sdf->ssen = 1;
+				sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+				sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
+			} else {
+				sdf->sss = 0;
+				sdf->ssd = 0;
+			}
+#endif
+			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+				fsl_queue->pending_desc[start].dst,
+				fsl_queue->pending_desc[start].src,
+				fsl_queue->pending_desc[start].len);
+			if (!ret) {
+				start = (start + 1) &
+					(fsl_queue->pending_max - 1);
+				fsl_queue->pending_start = start;
+				fsl_queue->pending_num--;
+			} else {
+				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
+					ret);
+				return -EIO;
+			}
+		}
 
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
-		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-		fsl_queue->stats.submitted++;
+		return 0;
+	}
+	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
+	csgf_src->extion = 1;
+	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
+	csgf_dest->extion = 1;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
+			fsl_queue->pending_desc[idx].src);
+		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_ssge[i].final = 0;
+		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
+			fsl_queue->pending_desc[idx].dst);
+		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_dsge[i].final = 0;
+		total_len += fsl_queue->pending_desc[idx].len;
+	}
+	ft->desc_ssge[num - 1].final = 1;
+	ft->desc_dsge[num - 1].final = 1;
+	csgf_src->length = total_len;
+	csgf_dest->length = total_len;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	if (total_len > FSL_QDMA_CMD_SSS_DISTANCE) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
+		sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
 	} else {
-		fsl_queue->pending++;
+		sdf->sss = 0;
+		sdf->ssd = 0;
 	}
+#endif
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	if (ret)
+		return ret;
+
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
 
-	if (fsl_queue->ci)
-		return fsl_queue->ci - 1;
-	else
-		return fsl_queue->n_cq;
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	fsl_queue->pending_start =
+		(start + num) & (fsl_queue->pending_max - 1);
+	fsl_queue->pending_num = 0;
+
+	return 0;
 }
 
 static int
@@ -564,8 +873,9 @@ dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
 		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = fsl_qdma->n_queues;
-	dev_info->max_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
 	dev_info->min_desc = QDMA_QUEUE_SIZE;
 
 	return 0;
@@ -651,16 +961,11 @@ dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
-	void *block = fsl_queue->block_vir;
 
-	while (fsl_queue->pending) {
-		qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-			block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-		fsl_queue->pending--;
-		fsl_queue->stats.submitted++;
-	}
+	if (!fsl_queue->pending_num)
+		return 0;
 
-	return 0;
+	return fsl_qdma_enqueue_desc(fsl_queue);
 }
 
 static int
@@ -670,9 +975,86 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	uint16_t start = fsl_queue->pending_start;
+	uint8_t pending = fsl_queue->pending_num;
+	uint16_t idx;
 	int ret;
 
-	ret = fsl_qdma_enqueue_desc(fsl_queue, flags, dst, src, length);
+	if (pending >= FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	idx = (start + pending) & (fsl_queue->pending_max - 1);
+
+	fsl_queue->pending_desc[idx].src = src;
+	fsl_queue->pending_desc[idx].dst = dst;
+	fsl_queue->pending_desc[idx].flag =
+		DPAA_QDMA_IDX_FROM_FLAG(flags);
+	fsl_queue->pending_desc[idx].len = length;
+	fsl_queue->pending_num++;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
+
+	return ret;
+}
+
+static int
+dpaa_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
+{
+	int ret;
+	uint16_t i, start, idx;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
+			__func__, nb_src, nb_dst, vchan);
+		return -EINVAL;
+	}
+
+	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	start = fsl_queue->pending_start + fsl_queue->pending_num;
+	start = start & (fsl_queue->pending_max - 1);
+	idx = start;
+
+	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+
+	for (i = 0; i < nb_src; i++) {
+		if (unlikely(src[i].length != dst[i].length)) {
+			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
+				src[i].length, dst[i].length);
+			return -EINVAL;
+		}
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		fsl_queue->pending_desc[idx].src = src[i].addr;
+		fsl_queue->pending_desc[idx].dst = dst[i].addr;
+		fsl_queue->pending_desc[idx].len = dst[i].length;
+		fsl_queue->pending_desc[idx].flag = idx_addr[i];
+	}
+	fsl_queue->pending_num += nb_src;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
 
 	return ret;
 }
@@ -683,7 +1065,7 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 {
 	struct fsl_qdma_status_queue *stat_queue;
 	struct fsl_qdma_queue *cmd_queue;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
 	uint16_t start, count = 0;
 	uint8_t qid;
 	int ret;
@@ -697,9 +1079,6 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 		if (ret == true) {
 			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
 			cmd_queue->stats.completed++;
-			cmd_queue->complete++;
-			if (unlikely(cmd_queue->complete == cmd_queue->n_cq))
-				cmd_queue->complete = 0;
 			start++;
 			if (unlikely(start == stat_queue->n_cq))
 				start = 0;
@@ -713,16 +1092,81 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	return count;
 }
 
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+	struct fsl_qdma_err_reg local;
+	size_t i, offset = 0;
+	char err_msg[512];
+
+	local.dedr_be = rte_read32(&reg->dedr_be);
+	if (!local.dedr_be)
+		return 0;
+	offset = sprintf(err_msg, "ERR detected:\n");
+	if (local.dedr.ere) {
+		offset += sprintf(&err_msg[offset],
+			" ere(Enqueue rejection error)\n");
+	}
+	if (local.dedr.dde) {
+		offset += sprintf(&err_msg[offset],
+			" dde(Destination descriptor error)\n");
+	}
+	if (local.dedr.sde) {
+		offset += sprintf(&err_msg[offset],
+			" sde(Source descriptor error)\n");
+	}
+	if (local.dedr.cde) {
+		offset += sprintf(&err_msg[offset],
+			" cde(Command descriptor error)\n");
+	}
+	if (local.dedr.wte) {
+		offset += sprintf(&err_msg[offset],
+			" wte(Write transaction error)\n");
+	}
+	if (local.dedr.rte) {
+		offset += sprintf(&err_msg[offset],
+			" rte(Read transaction error)\n");
+	}
+	if (local.dedr.me) {
+		offset += sprintf(&err_msg[offset],
+			" me(Multiple errors of the same type)\n");
+	}
+	DPAA_QDMA_ERR("%s", err_msg);
+	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+			QDMA_IN(&reg->deccd_le[i]);
+	}
+	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
+	local.decbr = rte_read32(&reg->decbr);
+
+	offset = sprintf(err_msg, "ERR command:\n");
+	offset += sprintf(&err_msg[offset],
+		" status: %02x, ser: %d, offset:%d, fmt: %02x\n",
+		local.err_cmd.status, local.err_cmd.ser,
+		local.err_cmd.offset, local.err_cmd.format);
+	offset += sprintf(&err_msg[offset],
+		" address: 0x%"PRIx64", queue: %d, dd: %02x\n",
+		(uint64_t)local.err_cmd.addr_hi << 32 |
+		local.err_cmd.addr_lo,
+		local.err_cmd.queue, local.err_cmd.dd);
+	DPAA_QDMA_ERR("%s", err_msg);
+	DPAA_QDMA_ERR("ERR command block: %d, queue: %d\n",
+		local.deccqidr.block, local.deccqidr.queue);
+
+	rte_write32(local.dedr_be, &reg->dedr_be);
+
+	return -EIO;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret;
+	int ret, err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
-	int intr;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -734,55 +1178,27 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 				fsl_queue->block_id);
 	} else {
 		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, st);
+				nb_cpls, last_idx, st);
 	}
-	if (!ret) {
-		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-		if (intr) {
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECBR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-#endif
-			qdma_writel_be(0xbf,
-				    status + FSL_QDMA_DEDR);
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err)
 			fsl_queue->stats.errors++;
-		}
-	}
-
-	if (last_idx) {
-		if (unlikely(!fsl_queue->complete))
-			*last_idx = fsl_queue->n_cq - 1;
-		else
-			*last_idx = fsl_queue->complete - 1;
 	}
 
 	return ret;
 }
 
-
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret;
+	int ret, err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
 	void *status = fsl_qdma->status_base;
-	int intr;
-#endif
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -796,39 +1212,17 @@ dpaa_qdma_dequeue(void *dev_private,
 				fsl_queue->block_id);
 	} else {
 		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, NULL);
+				nb_cpls, last_idx, NULL);
 	}
-#ifdef CONFIG_RTE_DMA_DPAA_ERR_CHK
-	if (!ret) {
-		intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-		if (intr) {
-			DPAA_QDMA_ERR("DMA transaction error! %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr);
-			intr = qdma_readl(status + FSL_QDMA_DECBR);
-			DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr);
-			qdma_writel_be(0xbf,
-				    status + FSL_QDMA_DEDR);
-			intr = qdma_readl(status + FSL_QDMA_DEDR);
-			*has_error = true;
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err) {
+			if (has_error)
+				*has_error = true;
 			fsl_queue->stats.errors++;
 		}
 	}
-#endif
-	if (last_idx) {
-		if (unlikely(!fsl_queue->complete))
-			*last_idx = fsl_queue->n_cq - 1;
-		else
-			*last_idx = fsl_queue->complete - 1;
-	}
 
 	return ret;
 }
@@ -868,7 +1262,7 @@ dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 	const struct fsl_qdma_engine *fsl_qdma = dev_private;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	return fsl_queue->n_cq - fsl_queue->pending;
+	return fsl_queue->pending_max - fsl_queue->pending_num;
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
@@ -891,6 +1285,15 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ret;
 	uint32_t i, j, k;
 
+	if (getenv("DPAA_QDMA_DATA_VALIDATION"))
+		s_data_validation = 1;
+
+	if (getenv("DPAA_QDMA_HW_ERR_CHECK"))
+		s_hw_err_check = 1;
+
+	if (getenv("DPAA_QDMA_SG_DISABLE"))
+		s_sg_disable = 1;
+
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -981,6 +1384,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->device = &dpaa_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
+	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index da2dbf36c9..6c74d632e6 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -83,29 +83,15 @@
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
 
+#define FSL_QDMA_COMP_SG_FORMAT		0x1
+
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
-#define FSL_QDMA_CMD_LWC_OFFSET		16
-#define FSL_QDMA_CMD_PF			BIT(17)
-
-#define FSL_QDMA_CMD_SSEN		BIT(19)
 #define FSL_QDMA_CFG_SSS_OFFSET		12
 #define FSL_QDMA_CMD_SSS_STRIDE		128
 #define FSL_QDMA_CMD_SSS_DISTANCE	128
 
-#define QDMA_CCDF_STATUS		20
-#define QDMA_CCDF_OFFSET		20
-#define QDMA_CCDF_MASK			GENMASK(28, 20)
-#define QDMA_CCDF_FOTMAT		BIT(29)
-#define QDMA_CCDF_SER			BIT(30)
-
-#define QDMA_SG_FIN			BIT(30)
-#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
-
-#define COMMAND_QUEUE_OVERFLOW		10
-
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
 #define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
@@ -132,64 +118,160 @@
 	(((fsl_qdma_engine)->block_offset) * (x))
 
 /* qDMA Command Descriptor Formats */
-struct fsl_qdma_format {
-	uint32_t status; /* ser, status */
-	uint32_t cfg;	/* format, offset */
-	union {
-		struct {
-			uint32_t addr_lo; /* low 32-bits of 40-bit address */
-			uint8_t addr_hi; /* high 8-bits of 40-bit address */
-			uint8_t __reserved1[2];
-			uint8_t queue:3;
-			uint8_t rsv:3;
-			uint8_t dd:2;
-		};
-		uint64_t data;
-	};
-};
+struct fsl_qdma_comp_cmd_desc {
+	uint8_t status;
+	uint32_t rsv0:22;
+	uint32_t ser:1;
+	uint32_t rsv1:21;
+	uint32_t offset:9;
+	uint32_t format:3;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint16_t rsv3;
+	uint8_t queue:3;
+	uint8_t rsv4:3;
+	uint8_t dd:2;
+} __rte_packed;
+
+struct fsl_qdma_comp_sg_desc {
+	uint32_t offset:13;
+	uint32_t rsv0:19;
+	uint32_t length:30;
+	uint32_t final:1;
+	uint32_t extion:1;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint32_t rsv1:24;
+} __rte_packed;
 
-/* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	uint32_t rev3;
-	uint32_t cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	uint32_t rev5;
-	uint32_t cmd;
-};
+	uint32_t rsv0;
+	uint32_t ssd:12;
+	uint32_t sss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint32_t rsv3:17;
+	uint32_t prefetch:1;
+	uint32_t rsv4:1;
+	uint32_t ssen:1;
+	uint32_t rthrotl:4;
+	uint32_t sqos:3;
+	uint32_t ns:1;
+	uint32_t srttype:4;
+} __rte_packed;
 
-/* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	uint32_t rev1;
-	uint32_t cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	uint32_t rev3;
-	uint32_t cmd;
-};
+	uint32_t rsv0;
+	uint32_t dsd:12;
+	uint32_t dss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint16_t rsv3;
+	uint32_t lwc:2;
+	uint32_t rsv4:1;
+	uint32_t dsen:1;
+	uint32_t wthrotl:4;
+	uint32_t dqos:3;
+	uint32_t ns:1;
+	uint32_t dwttype:4;
+} __rte_packed;
 
 struct fsl_qdma_df {
 	struct fsl_qdma_sdf sdf;
 	struct fsl_qdma_ddf ddf;
 };
 
+#define FSL_QDMA_SG_MAX_ENTRY RTE_DPAAX_QDMA_JOB_SUBMIT_MAX
+#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE)
 struct fsl_qdma_cmpd_ft {
-	struct fsl_qdma_format desc_buf;
-	struct fsl_qdma_format desc_sbuf;
-	struct fsl_qdma_format desc_dbuf;
+	struct fsl_qdma_comp_sg_desc desc_buf;
+	struct fsl_qdma_comp_sg_desc desc_sbuf;
+	struct fsl_qdma_comp_sg_desc desc_dbuf;
+	uint64_t cache_align[2];
+	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	uint64_t phy_ssge;
+	uint64_t phy_dsge;
+} __rte_packed;
+
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+	uint32_t me:1;
+	uint32_t rsv0:1;
+	uint32_t rte:1;
+	uint32_t wte:1;
+	uint32_t cde:1;
+	uint32_t sde:1;
+	uint32_t dde:1;
+	uint32_t ere:1;
+	uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+	uint32_t rsv:27;
+	uint32_t block:2;
+	uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+	(sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+	uint32_t deier;
+	union {
+		rte_be32_t dedr_be;
+		struct fsl_qdma_dedr_reg dedr;
+	};
+	uint32_t rsv0[2];
+	union {
+		rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+		struct fsl_qdma_comp_cmd_desc err_cmd;
+	};
+	uint32_t rsv1[4];
+	union {
+		rte_be32_t deccqidr_be;
+		struct fsl_qdma_deccqidr_reg deccqidr;
+	};
+	rte_be32_t decbr;
+};
+
+#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+
+struct fsl_qdma_desc {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint64_t flag;
+	uint64_t len;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format *cmd_desc;
 	int used;
 	struct fsl_qdma_cmpd_ft **ft;
 	uint16_t ci;
-	uint16_t complete;
+	struct rte_ring *complete_burst;
+	struct rte_ring *complete_desc;
+	struct rte_ring *complete_pool;
 	uint16_t n_cq;
 	uint8_t block_id;
 	uint8_t queue_id;
 	uint8_t channel_id;
 	void *block_vir;
 	uint32_t le_cqmr;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t desc_in_hw[QDMA_QUEUE_SIZE];
 	struct rte_dma_stats stats;
-	uint8_t pending;
+	struct fsl_qdma_desc *pending_desc;
+	uint16_t pending_max;
+	uint16_t pending_start;
+	uint8_t pending_num;
+	uint16_t complete_start;
 	dma_addr_t bus_addr;
 	struct fsl_qdma_df **df;
 	void *engine;
@@ -200,7 +282,7 @@ struct fsl_qdma_status_queue {
 	uint16_t complete;
 	uint8_t block_id;
 	void *block_vir;
-	struct fsl_qdma_format *cq;
+	struct fsl_qdma_comp_cmd_desc *cq;
 	struct rte_dma_stats stats;
 	dma_addr_t bus_addr;
 	void *engine;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 23/30] dma/dpaa: block dequeue
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (21 preceding siblings ...)
  2024-07-22 16:39     ` [v3 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
                       ` (6 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Perform block dequeue to identify which queue of this block is completed.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 252 ++++++++++++++++-------------------
 1 file changed, 116 insertions(+), 136 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index f1ad60d1f2..de5ecc7d0b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -442,86 +442,6 @@ fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
 	}
 }
 
-static int
-fsl_qdma_queue_drain(struct fsl_qdma_queue *fsl_queue)
-{
-	uint32_t reg;
-	int count = 0, ret;
-	uint8_t *block = fsl_queue->block_vir;
-	uint16_t *dq_complete = NULL, drain_num = 0;
-	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
-
-	while (1) {
-		if (rte_ring_free_count(fsl_queue->complete_pool) <
-			(FSL_QDMA_SG_MAX_ENTRY * 2))
-			break;
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			break;
-
-		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
-		ret = rte_ring_dequeue(fsl_queue->complete_burst,
-			(void **)&dq_complete);
-		if (ret)
-			DPAA_QDMA_ERR("DQ desc number failed!\n");
-
-		ret = rte_ring_dequeue_bulk(fsl_queue->complete_desc,
-			(void **)desc, *dq_complete, NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		fsl_qdma_data_validation(desc, *dq_complete, fsl_queue);
-
-		ret = rte_ring_enqueue_bulk(fsl_queue->complete_pool,
-			(void **)desc, (*dq_complete), NULL);
-		if (ret != (*dq_complete)) {
-			DPAA_QDMA_ERR("EQ %d descs to return queue failed!(%d)\n",
-				*dq_complete, ret);
-		}
-
-		drain_num += *dq_complete;
-		fsl_queue->complete_start =
-			(fsl_queue->complete_start + (*dq_complete)) &
-			(fsl_queue->pending_max - 1);
-		fsl_queue->stats.completed++;
-
-		count++;
-	}
-
-	return drain_num;
-}
-
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_queue *fsl_queue,
-	const uint16_t nb_cpls, uint16_t *last_idx,
-	enum rte_dma_status_code *status)
-{
-	int ret;
-	uint16_t dq_num = 0, i;
-	struct fsl_qdma_desc *desc_complete[nb_cpls];
-
-	ret = fsl_qdma_queue_drain(fsl_queue);
-	if (ret < 0) {
-		DPAA_QDMA_ERR("Drain TX%d/Q%d failed!(%d)",
-			fsl_queue->block_id, fsl_queue->queue_id,
-			ret);
-	}
-
-	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
-		(void **)desc_complete, nb_cpls, NULL);
-	for (i = 0; i < dq_num; i++)
-		last_idx[i] = desc_complete[i]->flag;
-
-	if (status) {
-		for (i = 0; i < dq_num; i++)
-			status[i] = RTE_DMA_STATUS_SUCCESSFUL;
-	}
-
-	return dq_num;
-}
-
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -682,13 +602,90 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
+{
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete = NULL;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
+
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
+
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+			cmd_queue->stats.completed++;
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret)
+				DPAA_QDMA_ERR("DQ desc number failed!\n");
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("DQ %d descs failed!(%d)\n",
+					*dq_complete, ret);
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("Failed desc eq %d!=%d to %s\n",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
+}
+
 static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
-	int overflow = 0, drain;
-	uint32_t reg, check_num, drain_num;
+	int overflow = 0;
+	uint32_t reg;
+	uint16_t blk_drain, check_num, drain_num;
 	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
@@ -711,11 +708,12 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	drain_num = 0;
 
 drain_again:
-	drain = fsl_qdma_queue_drain(fsl_queue);
-	if (drain <= 0) {
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
 		drain_num++;
 		if (drain_num > 100) {
-			DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 				fsl_queue->block_id, fsl_queue->queue_id,
 				st->submitted - st->completed);
 			return -ENOSPC;
@@ -724,7 +722,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	}
 	check_num++;
 	if (check_num > 10) {
-		DPAA_QDMA_ERR("TC%d/Q%d failed drain, %"PRIu64" bd in HW.",
+		DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 			fsl_queue->block_id, fsl_queue->queue_id,
 			st->submitted - st->completed);
 		return -ENOSPC;
@@ -1059,39 +1057,6 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
-static uint16_t
-dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
-	uint8_t block_id)
-{
-	struct fsl_qdma_status_queue *stat_queue;
-	struct fsl_qdma_queue *cmd_queue;
-	struct fsl_qdma_comp_cmd_desc *cq;
-	uint16_t start, count = 0;
-	uint8_t qid;
-	int ret;
-
-	stat_queue = &fsl_qdma->stat_queues[block_id];
-	cq = stat_queue->cq;
-	start = stat_queue->complete;
-
-	do {
-		ret = qdma_ccdf_get_queue(&cq[start], &qid);
-		if (ret == true) {
-			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
-			cmd_queue->stats.completed++;
-			start++;
-			if (unlikely(start == stat_queue->n_cq))
-				start = 0;
-			count++;
-		} else {
-			break;
-		}
-	} while (1);
-	stat_queue->complete = start;
-
-	return count;
-}
-
 static int
 dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
 {
@@ -1164,22 +1129,32 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
 		return 0;
 	}
 
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, st);
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1187,7 +1162,7 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			fsl_queue->stats.errors++;
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static uint16_t
@@ -1196,9 +1171,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
-	int ret, err;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	void *status = fsl_qdma->status_base;
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
 	if (unlikely(fsl_qdma->is_silent)) {
 		DPAA_QDMA_WARN("Can't dq in silent mode\n");
@@ -1207,13 +1184,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	}
 
 	*has_error = false;
-	if (fsl_qdma->block_queues[fsl_queue->block_id] > 1) {
-		ret = dpaa_qdma_block_dequeue(fsl_qdma,
-				fsl_queue->block_id);
-	} else {
-		ret = fsl_qdma_queue_transfer_complete(fsl_queue,
-				nb_cpls, last_idx, NULL);
-	}
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)\n",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
 	if (s_hw_err_check) {
 		err = dpaa_qdma_err_handle(status +
 			FSL_QDMA_ERR_REG_STATUS_OFFSET);
@@ -1224,7 +1204,7 @@ dpaa_qdma_dequeue(void *dev_private,
 		}
 	}
 
-	return ret;
+	return dq_num;
 }
 
 static int
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 24/30] dma/dpaa: improve congestion handling
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (22 preceding siblings ...)
  2024-07-22 16:39     ` [v3 23/30] dma/dpaa: block dequeue Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
                       ` (5 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The congestion issue occurs frequently on low speed device(PCIe).
We should drain the command queue to make dma work when congestion occurs.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 157 +++++++++++++++++++----------------
 1 file changed, 85 insertions(+), 72 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index de5ecc7d0b..eaa5f81f6d 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -535,73 +535,6 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static int
-fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
-	int is_burst)
-{
-	uint16_t i, num = fsl_queue->pending_num, idx, start;
-	int ret;
-
-	num = is_burst ? fsl_queue->pending_num : 1;
-
-	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
-	ret = rte_ring_enqueue(fsl_queue->complete_burst,
-			&fsl_queue->desc_in_hw[fsl_queue->ci]);
-	if (ret) {
-		DPAA_QDMA_ERR("%s: Queue is full, try dequeue first",
-			__func__);
-		DPAA_QDMA_ERR("%s: submitted:%"PRIu64", completed:%"PRIu64"",
-			__func__, fsl_queue->stats.submitted,
-			fsl_queue->stats.completed);
-		return ret;
-	}
-	start = fsl_queue->pending_start;
-	for (i = 0; i < num; i++) {
-		idx = (start + i) & (fsl_queue->pending_max - 1);
-		ret = rte_ring_enqueue(fsl_queue->complete_desc,
-				&fsl_queue->pending_desc[idx]);
-		if (ret) {
-			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int
-fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
-	dma_addr_t dst, dma_addr_t src, size_t len)
-{
-	uint8_t *block = fsl_queue->block_vir;
-	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
-	struct fsl_qdma_cmpd_ft *ft;
-	int ret;
-
-	ft = fsl_queue->ft[fsl_queue->ci];
-	csgf_src = &ft->desc_sbuf;
-	csgf_dest = &ft->desc_dbuf;
-	qdma_desc_sge_addr_set64(csgf_src, src);
-	csgf_src->length = len;
-	csgf_src->extion = 0;
-	qdma_desc_sge_addr_set64(csgf_dest, dst);
-	csgf_dest->length = len;
-	csgf_dest->extion = 0;
-	/* This entry is the last entry. */
-	csgf_dest->final = 1;
-
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
-	if (ret)
-		return ret;
-	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
-
-	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
-		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
-	fsl_queue->stats.submitted++;
-
-	return 0;
-}
-
 static uint16_t
 dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	uint8_t block_id)
@@ -633,7 +566,6 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 		ret = qdma_ccdf_get_queue(&cq[start], &qid);
 		if (ret == true) {
 			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
-			cmd_queue->stats.completed++;
 
 			ret = rte_ring_dequeue(cmd_queue->complete_burst,
 				(void **)&dq_complete);
@@ -677,6 +609,87 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 	return count;
 }
 
+static int
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	int is_burst)
+{
+	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
+	int ret, dq_cnt;
+
+	num = is_burst ? fsl_queue->pending_num : 1;
+
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+eq_again:
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		dq_cnt = 0;
+dq_again:
+		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
+			fsl_queue->block_id);
+		dq_cnt++;
+		if (dq > 0) {
+			goto eq_again;
+		} else {
+			if (dq_cnt < 100)
+				goto dq_again;
+			DPAA_QDMA_ERR("%s: Dq block%d failed!",
+				__func__, fsl_queue->block_id);
+		}
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			DPAA_QDMA_ERR("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
+{
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	int ret;
+
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	return 0;
+}
+
 static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
@@ -702,7 +715,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 	if (likely(!overflow))
 		return 0;
 
-	DPAA_QDMA_ERR("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
 		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
 	drain_num = 0;
@@ -712,7 +725,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 		fsl_queue->block_id);
 	if (!blk_drain) {
 		drain_num++;
-		if (drain_num > 100) {
+		if (drain_num > 1000) {
 			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
 				fsl_queue->block_id, fsl_queue->queue_id,
 				st->submitted - st->completed);
@@ -721,8 +734,8 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 		goto drain_again;
 	}
 	check_num++;
-	if (check_num > 10) {
-		DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+	if (check_num > 1000) {
+		DPAA_QDMA_ERR("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
 			fsl_queue->block_id, fsl_queue->queue_id,
 			st->submitted - st->completed);
 		return -ENOSPC;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 25/30] dma/dpaa: disable SG descriptor as default
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (23 preceding siblings ...)
  2024-07-22 16:39     ` [v3 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
                       ` (4 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Burst operation used for SG copy as default until SG issue is fixed.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index eaa5f81f6d..8492d0de5b 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -11,7 +11,7 @@
 
 static int s_data_validation;
 static int s_hw_err_check;
-static int s_sg_disable;
+static int s_sg_disable = 1;
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -335,7 +335,6 @@ fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
 	rte_free(queue->cq);
 }
 
-
 static int
 fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
 	uint32_t block_id)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 26/30] dma/dpaa: improve ERRATA workaround solution
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (24 preceding siblings ...)
  2024-07-22 16:39     ` [v3 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
                       ` (3 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Fix issue of ERRATA 050757/050265 workaround which is not effective
in burst mode.

SDF/DDF is referred by first entry of compound frame table, move the DF to
compound frame table description which is suitable to adapt single copy
and SG/burst copy.

Fix SG issue which was caused by memset clearing phy address of SGE in
compound frame table.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 215 +++++++++++++++++------------------
 drivers/dma/dpaa/dpaa_qdma.h |   7 +-
 2 files changed, 107 insertions(+), 115 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8492d0de5b..5d91ad2d70 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -11,7 +11,10 @@
 
 static int s_data_validation;
 static int s_hw_err_check;
-static int s_sg_disable = 1;
+static int s_sg_enable = 1;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+static int s_pci_read = 1;
+#endif
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -126,10 +129,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	struct fsl_qdma_comp_cmd_desc *ccdf;
 	uint16_t i, j;
 	struct fsl_qdma_cmpd_ft *ft;
-	struct fsl_qdma_df *df;
 
 	for (i = 0; i < queue->n_cq; i++) {
-		dma_addr_t phy_ft = 0, phy_df = 0;
+		dma_addr_t phy_ft = 0;
 
 		queue->ft[i] = dma_pool_alloc(NULL,
 			sizeof(struct fsl_qdma_cmpd_ft),
@@ -156,25 +158,14 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
 		queue->ft[i]->phy_dsge = phy_ft +
 			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
-
-		queue->df[i] = dma_pool_alloc(NULL,
-			sizeof(struct fsl_qdma_df),
-			RTE_CACHE_LINE_SIZE, &phy_df);
-		if (!queue->df[i]) {
-			rte_free(queue->ft[i]);
-			queue->ft[i] = NULL;
-			goto fail;
-		}
-
-		memset(queue->ft[i], 0, sizeof(struct fsl_qdma_cmpd_ft));
-		memset(queue->df[i], 0, sizeof(struct fsl_qdma_df));
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
 
 		ft = queue->ft[i];
-		df = queue->df[i];
-		sdf = &df->sdf;
-		ddf = &df->ddf;
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_sge_addr_set64(&ft->desc_buf, phy_df);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
 		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
 
@@ -198,10 +189,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 	return 0;
 
 fail:
-	for (j = 0; j < i; j++) {
+	for (j = 0; j < i; j++)
 		rte_free(queue->ft[j]);
-		rte_free(queue->df[j]);
-	}
 
 	return -ENOMEM;
 }
@@ -247,23 +236,12 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
 	}
-	sprintf(nm, "Descriptor Buf_%d_%d",
-		block_id, queue_id);
-	cmd_queue->df = rte_zmalloc(nm,
-			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
-	if (!cmd_queue->df) {
-		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->ft);
-		rte_free(cmd_queue->cq);
-		return -ENOMEM;
-	}
 	sprintf(nm, "Pending_desc_%d_%d",
 		block_id, queue_id);
 	cmd_queue->pending_desc = rte_zmalloc(nm,
 		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
 	if (!cmd_queue->pending_desc) {
 		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
-		rte_free(cmd_queue->df);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
 		return -ENOMEM;
@@ -278,7 +256,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-desc_ring_%d_%d",
@@ -292,7 +269,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 	sprintf(nm, "complete-pool-desc_ring_%d_%d",
@@ -307,7 +283,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 		rte_free(cmd_queue->pending_desc);
 		rte_free(cmd_queue->ft);
 		rte_free(cmd_queue->cq);
-		rte_free(cmd_queue->df);
 		return -ENOMEM;
 	}
 
@@ -320,7 +295,6 @@ fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
 static void
 fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
 {
-	rte_free(queue->df);
 	rte_free(queue->ft);
 	rte_free(queue->cq);
 	rte_free(queue->pending_desc);
@@ -664,8 +638,30 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ft = fsl_queue->ft[fsl_queue->ci];
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -745,7 +741,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
 	uint8_t *block = fsl_queue->block_vir, i;
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
@@ -758,74 +754,10 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	struct fsl_qdma_sdf *sdf;
 #endif
 
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
-
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	sdf = &fsl_queue->df[fsl_queue->ci]->sdf;
-	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
-	sdf->prefetch = 1;
-#endif
-#endif
-
-	if (num == 1) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-		if (fsl_queue->pending_desc[start].len >
-			FSL_QDMA_CMD_SSS_DISTANCE) {
-			sdf->ssen = 1;
-			sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-			sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-		} else {
-			sdf->sss = 0;
-			sdf->ssd = 0;
-		}
-#endif
-		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-			fsl_queue->pending_desc[start].dst,
-			fsl_queue->pending_desc[start].src,
-			fsl_queue->pending_desc[start].len);
-		if (!ret) {
-			fsl_queue->pending_start =
-				(start + 1) & (fsl_queue->pending_max - 1);
-			fsl_queue->pending_num = 0;
-		}
-		return ret;
-	} else if (s_sg_disable) {
-		while (fsl_queue->pending_num > 0) {
-#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-			if (fsl_queue->pending_desc[start].len >
-				FSL_QDMA_CMD_SSS_DISTANCE) {
-				sdf->ssen = 1;
-				sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-				sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-			} else {
-				sdf->sss = 0;
-				sdf->ssd = 0;
-			}
-#endif
-			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
-				fsl_queue->pending_desc[start].dst,
-				fsl_queue->pending_desc[start].src,
-				fsl_queue->pending_desc[start].len);
-			if (!ret) {
-				start = (start + 1) &
-					(fsl_queue->pending_max - 1);
-				fsl_queue->pending_start = start;
-				fsl_queue->pending_num--;
-			} else {
-				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
-					ret);
-				return -EIO;
-			}
-		}
 
-		return 0;
-	}
 	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
 	csgf_src->extion = 1;
 	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
@@ -849,13 +781,21 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
-	if (total_len > FSL_QDMA_CMD_SSS_DISTANCE) {
-		sdf->ssen = 1;
-		sdf->sss = FSL_QDMA_CMD_SSS_STRIDE;
-		sdf->ssd = FSL_QDMA_CMD_SSS_DISTANCE;
-	} else {
-		sdf->sss = 0;
-		sdf->ssd = 0;
+	if (s_pci_read) {
+		sdf = &ft->df.sdf;
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
+		if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+			sdf->ssen = 1;
+			sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+			sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		} else {
+			sdf->ssen = 0;
+			sdf->sss = 0;
+			sdf->ssd = 0;
+		}
 	}
 #endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
@@ -875,6 +815,51 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	return 0;
 }
 
+static int
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
+{
+	uint16_t start = fsl_queue->pending_start;
+	int ret;
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	} else if (!s_sg_enable) {
+		while (fsl_queue->pending_num > 0) {
+			ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+				fsl_queue->pending_desc[start].dst,
+				fsl_queue->pending_desc[start].src,
+				fsl_queue->pending_desc[start].len);
+			if (!ret) {
+				start = (start + 1) &
+					(fsl_queue->pending_max - 1);
+				fsl_queue->pending_start = start;
+				fsl_queue->pending_num--;
+			} else {
+				DPAA_QDMA_ERR("Eq pending desc failed(%d)",
+					ret);
+				return -EIO;
+			}
+		}
+
+		return 0;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
+
 static int
 dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
 	__rte_unused uint32_t info_sz)
@@ -1276,6 +1261,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int regs_size;
 	int ret;
 	uint32_t i, j, k;
+	char *penv;
 
 	if (getenv("DPAA_QDMA_DATA_VALIDATION"))
 		s_data_validation = 1;
@@ -1283,8 +1269,15 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (getenv("DPAA_QDMA_HW_ERR_CHECK"))
 		s_hw_err_check = 1;
 
-	if (getenv("DPAA_QDMA_SG_DISABLE"))
-		s_sg_disable = 1;
+	penv = getenv("DPAA_QDMA_SG_ENABLE");
+	if (penv)
+		s_sg_enable = atoi(penv);
+
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	penv = getenv("DPAA_QDMA_PCI_READ");
+	if (penv)
+		s_pci_read = atoi(penv);
+#endif
 
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 6c74d632e6..f2aa6fdd34 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -88,9 +88,7 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CFG_SSS_OFFSET		12
-#define FSL_QDMA_CMD_SSS_STRIDE		128
-#define FSL_QDMA_CMD_SSS_DISTANCE	128
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
@@ -192,8 +190,10 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t cache_align[2];
 	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
 	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
 	uint64_t phy_ssge;
 	uint64_t phy_dsge;
+	uint64_t phy_df;
 } __rte_packed;
 
 #define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
@@ -273,7 +273,6 @@ struct fsl_qdma_queue {
 	uint8_t pending_num;
 	uint16_t complete_start;
 	dma_addr_t bus_addr;
-	struct fsl_qdma_df **df;
 	void *engine;
 };
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 27/30] dma/dpaa: improve silent mode support
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (25 preceding siblings ...)
  2024-07-22 16:39     ` [v3 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
                       ` (2 subsequent siblings)
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Don't save eq context in silent mode, check HW status only to
identify if queue is full.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 5d91ad2d70..8f5b6c6ea5 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -586,9 +586,13 @@ static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	int is_burst)
 {
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
 	int ret, dq_cnt;
 
+	if (fsl_qdma->is_silent)
+		return 0;
+
 	num = is_burst ? fsl_queue->pending_num : 1;
 
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
@@ -697,7 +701,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 
 	check_num = 0;
 overflow_check:
-	if (unlikely(s_hw_err_check)) {
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
 		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -707,8 +711,14 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 			QDMA_QUEUE_CR_WM) ? 1 : 0;
 	}
 
-	if (likely(!overflow))
+	if (likely(!overflow)) {
 		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num < 1000)
+			goto overflow_check;
+		return -ENOSPC;
+	}
 
 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 28/30] dma/dpaa: support multiple SG copies
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (26 preceding siblings ...)
  2024-07-22 16:39     ` [v3 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
  2024-07-22 16:39     ` [v3 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Split burst copies to multiple SG copies if burst number exceeds
max number of SG entries.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 180 +++++++++++++++++++----------------
 drivers/dma/dpaa/dpaa_qdma.h |   2 +-
 2 files changed, 98 insertions(+), 84 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8f5b6c6ea5..383142fc75 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -584,17 +584,15 @@ dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
 
 static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
-	int is_burst)
+	uint16_t num)
 {
 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
-	uint16_t i, num = fsl_queue->pending_num, idx, start, dq;
+	uint16_t i, idx, start, dq;
 	int ret, dq_cnt;
 
 	if (fsl_qdma->is_silent)
 		return 0;
 
-	num = is_burst ? fsl_queue->pending_num : 1;
-
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
 eq_again:
 	ret = rte_ring_enqueue(fsl_queue->complete_burst,
@@ -634,6 +632,69 @@ fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0;
+	uint32_t reg;
+	uint16_t blk_drain, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
+
+	check_num = 0;
+overflow_check:
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
+		reg = qdma_readl_be(block +
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
+
+	if (likely(!overflow)) {
+		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num >= 10000) {
+			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
+			check_num = 0;
+		}
+		goto overflow_check;
+	}
+
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
+		drain_num++;
+		if (drain_num >= 10000) {
+			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			drain_num = 0;
+		}
+		goto drain_again;
+	}
+	check_num++;
+	if (check_num >= 1000) {
+		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
+		check_num = 0;
+	}
+	goto overflow_check;
+
+	return 0;
+}
+
 static int
 fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	dma_addr_t dst, dma_addr_t src, size_t len)
@@ -646,6 +707,10 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_sdf *sdf;
 #endif
 
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	ft = fsl_queue->ft[fsl_queue->ci];
 
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
@@ -677,7 +742,7 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	/* This entry is the last entry. */
 	csgf_dest->final = 1;
 
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 0);
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
 	if (ret)
 		return ret;
 	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
@@ -689,81 +754,30 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	return 0;
 }
 
-static int
-fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
-{
-	int overflow = 0;
-	uint32_t reg;
-	uint16_t blk_drain, check_num, drain_num;
-	uint8_t *block = fsl_queue->block_vir;
-	const struct rte_dma_stats *st = &fsl_queue->stats;
-	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
-
-	check_num = 0;
-overflow_check:
-	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
-		reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
-		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
-			1 : 0;
-	} else {
-		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
-			QDMA_QUEUE_CR_WM) ? 1 : 0;
-	}
-
-	if (likely(!overflow)) {
-		return 0;
-	} else if (fsl_qdma->is_silent) {
-		check_num++;
-		if (check_num < 1000)
-			goto overflow_check;
-		return -ENOSPC;
-	}
-
-	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
-		fsl_queue->block_id, fsl_queue->queue_id,
-		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
-	drain_num = 0;
-
-drain_again:
-	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
-		fsl_queue->block_id);
-	if (!blk_drain) {
-		drain_num++;
-		if (drain_num > 1000) {
-			DPAA_QDMA_ERR("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
-				fsl_queue->block_id, fsl_queue->queue_id,
-				st->submitted - st->completed);
-			return -ENOSPC;
-		}
-		goto drain_again;
-	}
-	check_num++;
-	if (check_num > 1000) {
-		DPAA_QDMA_ERR("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
-			fsl_queue->block_id, fsl_queue->queue_id,
-			st->submitted - st->completed);
-		return -ENOSPC;
-	}
-	goto overflow_check;
-
-	return -ENOSPC;
-}
-
 static int
 fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
-	uint8_t *block = fsl_queue->block_vir, i;
+	uint8_t *block = fsl_queue->block_vir;
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
-	uint32_t total_len = 0;
-	uint8_t num = fsl_queue->pending_num;
-	uint16_t start = fsl_queue->pending_start, idx;
+	uint32_t total_len;
+	uint16_t start, idx, num, i;
 	int ret;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
 #endif
 
+eq_sg:
+	total_len = 0;
+	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
+		num = FSL_QDMA_SG_MAX_ENTRY;
+	else
+		num = fsl_queue->pending_num;
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
@@ -808,7 +822,7 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 		}
 	}
 #endif
-	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
 	if (ret)
 		return ret;
 
@@ -820,7 +834,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 
 	fsl_queue->pending_start =
 		(start + num) & (fsl_queue->pending_max - 1);
-	fsl_queue->pending_num = 0;
+	fsl_queue->pending_num -= num;
+	if (fsl_queue->pending_num > 0)
+		goto eq_sg;
 
 	return 0;
 }
@@ -831,10 +847,6 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 	uint16_t start = fsl_queue->pending_start;
 	int ret;
 
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
-
 	if (fsl_queue->pending_num == 1) {
 		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
 			fsl_queue->pending_desc[start].dst,
@@ -871,17 +883,19 @@ fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 }
 
 static int
-dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	__rte_unused uint32_t info_sz)
+dpaa_qdma_info_get(const struct rte_dma_dev *dev,
+	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY;
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
 	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = fsl_qdma->n_queues;
 	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
 	dev_info->min_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
 
 	return 0;
 }
@@ -985,9 +999,9 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	uint16_t idx;
 	int ret;
 
-	if (pending >= FSL_QDMA_SG_MAX_ENTRY) {
-		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
-			vchan);
+	if (pending >= fsl_queue->pending_max) {
+		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
+			pending, vchan);
 		return -ENOSPC;
 	}
 	idx = (start + pending) & (fsl_queue->pending_max - 1);
@@ -1253,7 +1267,7 @@ dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
-	.dev_info_get		  = dpaa_info_get,
+	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index f2aa6fdd34..b5d76776cb 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -270,7 +270,7 @@ struct fsl_qdma_queue {
 	struct fsl_qdma_desc *pending_desc;
 	uint16_t pending_max;
 	uint16_t pending_start;
-	uint8_t pending_num;
+	uint16_t pending_num;
 	uint16_t complete_start;
 	dma_addr_t bus_addr;
 	void *engine;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 29/30] dma/dpaa: support max SG entry size
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (27 preceding siblings ...)
  2024-07-22 16:39     ` [v3 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  2024-07-22 16:39     ` [v3 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

SG transaction is not helpful for performance of large transaction size.
Start single transaction for size > max SG entry size in SG copy.

Default MAX SG entry size is 2000 bytes which is experiment data of
mem to mem, user can change it according to experiment:
export DPAA_QDMA_SG_MAX_ENTRY_SIZE=xxx

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 41 ++++++++++++++++++++++++++++++++----
 1 file changed, 37 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 383142fc75..e3f2777b40 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -12,6 +12,8 @@
 static int s_data_validation;
 static int s_hw_err_check;
 static int s_sg_enable = 1;
+static uint32_t s_sg_max_entry_sz = 2000;
+
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 static int s_pci_read = 1;
 #endif
@@ -761,7 +763,7 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	uint32_t total_len;
-	uint16_t start, idx, num, i;
+	uint16_t start, idx, num, i, next_idx;
 	int ret;
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	struct fsl_qdma_sdf *sdf;
@@ -770,13 +772,31 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 eq_sg:
 	total_len = 0;
 	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
+		fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num--;
+		}
+		if (fsl_queue->pending_num > 0)
+			goto eq_sg;
+
+		return ret;
+	}
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
 	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
 		num = FSL_QDMA_SG_MAX_ENTRY;
 	else
 		num = fsl_queue->pending_num;
-	ret = fsl_qdma_enqueue_overflow(fsl_queue);
-	if (unlikely(ret))
-		return ret;
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 	csgf_src = &ft->desc_sbuf;
@@ -799,7 +819,16 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
 		ft->desc_dsge[i].final = 0;
 		total_len += fsl_queue->pending_desc[idx].len;
+		if ((i + 1) != num) {
+			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
+			if (fsl_queue->pending_desc[next_idx].len >
+				s_sg_max_entry_sz) {
+				num = i + 1;
+				break;
+			}
+		}
 	}
+
 	ft->desc_ssge[num - 1].final = 1;
 	ft->desc_dsge[num - 1].final = 1;
 	csgf_src->length = total_len;
@@ -1297,6 +1326,10 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	if (penv)
 		s_sg_enable = atoi(penv);
 
+	penv = getenv("DPAA_QDMA_SG_MAX_ENTRY_SIZE");
+	if (penv)
+		s_sg_max_entry_sz = atoi(penv);
+
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	penv = getenv("DPAA_QDMA_PCI_READ");
 	if (penv)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v3 30/30] bus/dpaa: add port bmi stats
  2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
                       ` (28 preceding siblings ...)
  2024-07-22 16:39     ` [v3 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
@ 2024-07-22 16:39     ` Gagandeep Singh
  29 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-07-22 16:39 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add BMI statistics and fixing the existing extended
statistics

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 65 +++++++++++++++++++++++++++-
 drivers/bus/dpaa/include/fman.h      |  4 +-
 drivers/bus/dpaa/include/fsl_fman.h  | 12 +++++
 drivers/bus/dpaa/version.map         |  4 ++
 drivers/net/dpaa/dpaa_ethdev.c       | 46 +++++++++++++++++---
 drivers/net/dpaa/dpaa_ethdev.h       | 12 +++++
 6 files changed, 134 insertions(+), 9 deletions(-)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 24a99f7235..27b39a4975 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -244,8 +244,8 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
 
 	for (i = 0; i < n; i++)
-		value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) |
-				(u64)in_be32((char *)regs + base_offset +
+		value[i] = ((u64)in_be32((char *)regs + base_offset + 8 * i) |
+				((u64)in_be32((char *)regs + base_offset +
 				8 * i + 4)) << 32);
 }
 
@@ -266,6 +266,67 @@ fman_if_stats_reset(struct fman_if *p)
 		;
 }
 
+void
+fman_if_bmi_stats_enable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp |= FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_disable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp &= ~FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	int i = 0;
+
+	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
+	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
+}
+
+void
+fman_if_bmi_stats_reset(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+
+	out_be32(&regs->fmbm_rfrc, 0);
+	out_be32(&regs->fmbm_rfbc, 0);
+	out_be32(&regs->fmbm_rlfc, 0);
+	out_be32(&regs->fmbm_rffc, 0);
+	out_be32(&regs->fmbm_rfdc, 0);
+	out_be32(&regs->fmbm_rfldec, 0);
+	out_be32(&regs->fmbm_rodc, 0);
+	out_be32(&regs->fmbm_rbdc, 0);
+}
+
 void
 fman_if_promiscuous_enable(struct fman_if *p)
 {
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index 3a6dd555a7..60681068ea 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -56,6 +56,8 @@
 #define FMAN_PORT_BMI_FIFO_UNITS	0x100
 #define FMAN_PORT_IC_OFFSET_UNITS	0x10
 
+#define FMAN_BMI_COUNTERS_EN 0x80000000
+
 #define FMAN_ENABLE_BPOOL_DEPLETION	0xF00000F0
 
 #define HASH_CTRL_MCAST_EN	0x00000100
@@ -260,7 +262,7 @@ struct rx_bmi_regs {
 					/**< Buffer Manager pool Information-*/
 	uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
 					/**< Allocate Counter-*/
-	uint32_t reserved0130[8];
+	uint32_t reserved0120[16];
 					/**< 0x130/0x140 - 0x15F reserved -*/
 	uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
 					/**< Congestion Group Map*/
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 20690f8329..5a9750ad0c 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -60,6 +60,18 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
+__rte_internal
+void fman_if_bmi_stats_enable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_disable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value);
+
+__rte_internal
+void fman_if_bmi_stats_reset(struct fman_if *p);
+
 /* Set ignore pause option for a specific interface */
 void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
 
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 3f547f75cf..a17d57632e 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -24,6 +24,10 @@ INTERNAL {
 	fman_dealloc_bufs_mask_hi;
 	fman_dealloc_bufs_mask_lo;
 	fman_if_add_mac_addr;
+	fman_if_bmi_stats_enable;
+	fman_if_bmi_stats_disable;
+	fman_if_bmi_stats_get_all;
+	fman_if_bmi_stats_reset;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
 	fman_if_discard_rx_errors;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 060b8c678f..4d9a4c7e6d 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -131,6 +131,22 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 		offsetof(struct dpaa_if_stats, tvlan)},
 	{"rx_undersized",
 		offsetof(struct dpaa_if_stats, tund)},
+	{"rx_frame_counter",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
+	{"rx_bad_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
+	{"rx_large_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
+	{"rx_filter_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
+	{"rx_frame_discrad_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
+	{"rx_frame_list_dma_err_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
+	{"rx_out_of_buffer_discard ",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
+	{"rx_buf_diallocate",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
@@ -430,6 +446,7 @@ static void dpaa_interrupt_handler(void *param)
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
 	uint16_t i;
 
 	PMD_INIT_FUNC_TRACE();
@@ -443,7 +460,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 	else
 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
 
-	fman_if_enable_rx(dev->process_private);
+	fman_if_bmi_stats_enable(fif);
+	fman_if_bmi_stats_reset(fif);
+	fman_if_enable_rx(fif);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -461,8 +480,10 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 	dev->data->dev_started = 0;
 
-	if (!fif->is_shared_mac)
+	if (!fif->is_shared_mac) {
+		fman_if_bmi_stats_disable(fif);
 		fman_if_disable_rx(fif);
+	}
 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -769,6 +790,7 @@ static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	fman_if_stats_reset(dev->process_private);
+	fman_if_bmi_stats_reset(dev->process_private);
 
 	return 0;
 }
@@ -777,8 +799,9 @@ static int
 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned int n)
 {
-	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (n < num)
 		return num;
@@ -789,10 +812,16 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	fman_if_stats_get_all(dev->process_private, values,
 			      sizeof(struct dpaa_if_stats) / 8);
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < num - (bmi_count - 1); i++) {
 		xstats[i].id = i;
 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
 	}
+	fman_if_bmi_stats_get_all(dev->process_private, values);
+	for (j = 0; i < num; i++, j++) {
+		xstats[i].id = i;
+		xstats[i].value = values[j];
+	}
+
 	return i;
 }
 
@@ -819,8 +848,9 @@ static int
 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		      uint64_t *values, unsigned int n)
 {
-	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (!ids) {
 		if (n < stat_cnt)
@@ -832,10 +862,14 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		fman_if_stats_get_all(dev->process_private, values_copy,
 				      sizeof(struct dpaa_if_stats) / 8);
 
-		for (i = 0; i < stat_cnt; i++)
+		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
 			values[i] =
 				values_copy[dpaa_xstats_strings[i].offset / 8];
 
+		fman_if_bmi_stats_get_all(dev->process_private, values);
+		for (j = 0; i < stat_cnt; i++, j++)
+			values[i] = values_copy[j];
+
 		return stat_cnt;
 	}
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index b6c61b8b6b..261a5a3ca7 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -212,6 +212,18 @@ dpaa_rx_cb_atomic(void *event,
 		  const struct qm_dqrr_entry *dqrr,
 		  void **bufs);
 
+struct dpaa_if_rx_bmi_stats {
+	uint32_t fmbm_rstc;		/**< Rx Statistics Counters*/
+	uint32_t fmbm_rfrc;		/**< Rx Frame Counter*/
+	uint32_t fmbm_rfbc;		/**< Rx Bad Frames Counter*/
+	uint32_t fmbm_rlfc;		/**< Rx Large Frames Counter*/
+	uint32_t fmbm_rffc;		/**< Rx Filter Frames Counter*/
+	uint32_t fmbm_rfdc;		/**< Rx Frame Discard Counter*/
+	uint32_t fmbm_rfldec;		/**< Rx Frames List DMA Error Counter*/
+	uint32_t fmbm_rodc;		/**< Rx Out of Buffers Discard nntr*/
+	uint32_t fmbm_rbdc;		/**< Rx Buffers Deallocate Counter*/
+};
+
 /* PMD related logs */
 extern int dpaa_logtype_pmd;
 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v3 02/30] dma/dpaa2: support multiple HW queues
  2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
@ 2024-07-22 20:19       ` Stephen Hemminger
  2024-10-07 20:51       ` Stephen Hemminger
  1 sibling, 0 replies; 165+ messages in thread
From: Stephen Hemminger @ 2024-07-22 20:19 UTC (permalink / raw)
  To: Gagandeep Singh; +Cc: dev, Hemant Agrawal, Jun Yang

On Mon, 22 Jul 2024 22:09:02 +0530
Gagandeep Singh <g.singh@nxp.com> wrote:

> index 5954b552b5..945ba71e4a 100644
> --- a/drivers/dma/dpaa2/dpaa2_qdma.c
> +++ b/drivers/dma/dpaa2/dpaa2_qdma.c
> @@ -478,9 +478,9 @@ dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
>  
>  static inline uint16_t
>  dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
> -			     const struct qbman_fd *fd,
> -			     struct rte_dpaa2_qdma_job **job,
> -			     uint16_t *nb_jobs)
> +	const struct qbman_fd *fd,
> +	struct rte_dpaa2_qdma_job **job,
> +	uint16_t *nb_jobs)

Why are you changing this? The DPDK style is either to line up the args with the paren
or use two tabs here. Changing indentation leads to lots of extra changes which makes
it hard to review. Please, just leave the existing indentation alone or make it
a separate patch.

^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v3 05/30] dma/dpaa2: add sanity check for SG entry
  2024-07-22 16:39     ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
@ 2024-07-22 20:21       ` Stephen Hemminger
  0 siblings, 0 replies; 165+ messages in thread
From: Stephen Hemminger @ 2024-07-22 20:21 UTC (permalink / raw)
  To: Gagandeep Singh; +Cc: dev, Hemant Agrawal, Jun Yang

On Mon, 22 Jul 2024 22:09:05 +0530
Gagandeep Singh <g.singh@nxp.com> wrote:

> From: Jun Yang <jun.yang@nxp.com>
> 
> Make sure the SG entry number doesn't overflow.
> 
> Signed-off-by: Jun Yang <jun.yang@nxp.com>
> ---

Please do not use the term 'sanity check'.
It is on the Inclusive Naming word list as a Tier 2: strongly consider replacing.
https://inclusivenaming.org/word-lists/tier-2/sanity-check/

^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v3 02/30] dma/dpaa2: support multiple HW queues
  2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
  2024-07-22 20:19       ` Stephen Hemminger
@ 2024-10-07 20:51       ` Stephen Hemminger
  1 sibling, 0 replies; 165+ messages in thread
From: Stephen Hemminger @ 2024-10-07 20:51 UTC (permalink / raw)
  To: Gagandeep Singh; +Cc: dev, Hemant Agrawal, Jun Yang

On Mon, 22 Jul 2024 22:09:02 +0530
Gagandeep Singh <g.singh@nxp.com> wrote:

> From: Jun Yang <jun.yang@nxp.com>
> 
> Initialize and Configure queues of dma device according to hw queues
> supported from mc bus.
> Because multiple queues per device are supported, virt queues
> implementation are dropped.
> 
> Signed-off-by: Jun Yang <jun.yang@nxp.com>

Because of later changes to main branch, this series no longer applies
cleanly. Please rebase and retest.

^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 00/15] NXP DMA driver fixes and Enhancements
  2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-10-08  7:22       ` Gagandeep Singh
  2024-10-08  7:22         ` [v4 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                           ` (14 more replies)
  0 siblings, 15 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev

V4 changes:
* rebased series to latest commit and patches reduced.

V3 changes:
* fix 32 bit compilation issue

V2 changes:
* fix compilation issue on ubuntu 22.04

Hemant Agrawal (1):
  bus/dpaa: add port bmi stats

Jun Yang (14):
  dma/dpaa2: configure route by port by PCIe port param
  dma/dpaa2: refactor driver code
  bus/fslmc: enhance the qbman dq storage logic
  dma/dpaa2: add short FD support
  dma/dpaa2: limit the max descriptor number
  dma/dpaa2: change the DMA copy return value
  dma/dpaa2: move the qdma header to common place
  dma/dpaa: refactor driver
  dma/dpaa: support burst capacity API
  dma/dpaa: add silent mode support
  dma/dpaa: add workaround for ERR050757
  dma/dpaa: qdma stall workaround for ERR050265
  dma/dpaa: add Scatter Gather support
  dma/dpaa: add DMA error checks

 config/arm/meson.build                        |    4 +-
 doc/api/doxy-api-index.md                     |    2 +-
 doc/api/doxy-api.conf.in                      |    2 +-
 doc/guides/dmadevs/dpaa.rst                   |    9 +
 doc/guides/dmadevs/dpaa2.rst                  |   10 +
 drivers/bus/dpaa/base/fman/fman_hw.c          |   65 +-
 drivers/bus/dpaa/include/fman.h               |    4 +-
 drivers/bus/dpaa/include/fsl_fman.h           |   12 +
 drivers/bus/dpaa/version.map                  |    4 +
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c      |   25 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |    7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |   38 +-
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  |   29 +-
 drivers/common/dpaax/meson.build              |    3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h     |   23 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c   |   23 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c   |    4 +-
 drivers/dma/dpaa/dpaa_qdma.c                  | 1593 +++++++----
 drivers/dma/dpaa/dpaa_qdma.h                  |  292 +-
 drivers/dma/dpaa2/dpaa2_qdma.c                | 2446 +++++++++--------
 drivers/dma/dpaa2/dpaa2_qdma.h                |  243 +-
 drivers/dma/dpaa2/meson.build                 |    4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        |  177 --
 drivers/dma/dpaa2/version.map                 |   14 -
 drivers/net/dpaa/dpaa_ethdev.c                |   46 +-
 drivers/net/dpaa/dpaa_ethdev.h                |   12 +
 drivers/net/dpaa2/dpaa2_ethdev.c              |   83 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   19 +-
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c         |    4 +-
 29 files changed, 2899 insertions(+), 2298 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
 delete mode 100644 drivers/dma/dpaa2/version.map

-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 01/15] dma/dpaa2: configure route by port by PCIe port param
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-08  7:22         ` [v4 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
                           ` (13 subsequent siblings)
  14 siblings, 1 reply; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

struct {
	uint64_t coreid : 4; /**--rbp.sportid / rbp.dportid*/
	uint64_t pfid : 8; /**--rbp.spfid / rbp.dpfid*/
	uint64_t vfen : 1; /**--rbp.svfa / rbp.dvfa*/
	uint64_t vfid : 16; /**--rbp.svfid / rbp.dvfid*/
	.....
} pcie;

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  | 29 ++++++---
 drivers/dma/dpaa2/dpaa2_qdma.c                | 59 +++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h                | 38 +++++++++++-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        | 55 +----------------
 drivers/dma/dpaa2/version.map                 |  1 -
 5 files changed, 100 insertions(+), 82 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48ffb1b46e..7528b610e1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
  *
  */
 #ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
 			uint32_t saddr_hi;
 
 			uint32_t len_sl:18;
-			uint32_t rsv1:14;
-
+			uint32_t rsv13:2;
+			uint32_t svfid:6;
+			uint32_t rsv12:2;
+			uint32_t spfid:2;
+			uint32_t rsv1:2;
 			uint32_t sportid:4;
-			uint32_t rsv2:22;
+			uint32_t rsv2:1;
+			uint32_t sca:1;
+			uint32_t sat:2;
+			uint32_t sattr:3;
+			uint32_t svfa:1;
+			uint32_t stc:3;
 			uint32_t bmt:1;
-			uint32_t rsv3:1;
+			uint32_t dvfid:6;
+			uint32_t rsv3:2;
+			uint32_t dpfid:2;
+			uint32_t rsv31:2;
 			uint32_t fmt:2;
 			uint32_t sl:1;
 			uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
 			uint32_t acc_err:4;
 			uint32_t rsv5:4;
 			uint32_t ser:1;
-			uint32_t rsv6:3;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
 			uint32_t wrttype:4;
 			uint32_t dqos:3;
 			uint32_t drbp:1;
 			uint32_t dlwc:2;
-			uint32_t rsv7:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
 			uint32_t rdttype:4;
 			uint32_t sqos:3;
 			uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
 			uint32_t saddr_lo;
 
 			uint32_t saddr_hi:17;
-			uint32_t rsv1:15;
+			uint32_t rsv1_att:15;
 
 			uint32_t len;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5780e49297..5d4749eae3 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
 static inline int
 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
 		     uint32_t len, struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_rbp *rbp, int ser)
+		     struct dpaa2_qdma_rbp *rbp, int ser)
 {
 	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
 	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 			uint64_t fle_iova,
-			struct rte_dpaa2_qdma_rbp *rbp,
+			struct dpaa2_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags, uint32_t fmt)
 {
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* source */
 		sdd->read_cmd.portid = rbp->sportid;
 		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->svfid;
 
 		if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* destination */
 		sdd->write_cmd.portid = rbp->dportid;
 		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->dvfid;
 
 		if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
 		     struct rte_dpaa2_qdma_job **job,
 		     uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	size_t iova;
 	int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
 				  struct rte_dpaa2_qdma_job **job,
 				  uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
 			   struct rte_dpaa2_qdma_job **job,
 			   uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
 			struct rte_dpaa2_qdma_job **job,
 			uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	void *elem;
 	struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
 	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
 }
 
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-				struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+	const struct rte_dma_vchan_conf *conf)
 {
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.dportid = conf->dst_port.pcie.coreid;
+		vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+		if (conf->dst_port.pcie.vfen) {
+			vq->rbp.dvfa = 1;
+			vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+		}
+		vq->rbp.drbp = 1;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.sportid = conf->src_port.pcie.coreid;
+		vq->rbp.spfid = conf->src_port.pcie.pfid;
+		if (conf->src_port.pcie.vfen) {
+			vq->rbp.svfa = 1;
+			vq->rbp.dvfid = conf->src_port.pcie.vfid;
+		}
+		vq->rbp.srbp = 1;
+	}
 
-	memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
-			sizeof(struct rte_dpaa2_qdma_rbp));
+	return 0;
 }
 
 static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
-	int sg_enable = 0;
+	int sg_enable = 0, ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
+	ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+	if (ret)
+		return ret;
+
 	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
 		sg_enable = 1;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 5941b5a5d3..811906fcbc 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
 	};
 } __rte_packed;
 
+struct dpaa2_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	uint32_t dvfa:1;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	uint32_t svfa:1;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:2;
+};
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
 	/** Route by port */
-	struct rte_dpaa2_qdma_rbp rbp;
+	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
 	/** States if this vq has exclusively associated hw queue */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 5a8da46d12..b0bf9d8bcc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -13,42 +13,6 @@
 /** States if the destination addresses is physical. */
 #define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
 
-struct rte_dpaa2_qdma_rbp {
-	uint32_t use_ultrashort:1;
-	uint32_t enable:1;
-	/**
-	 * dportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t dportid:4;
-	uint32_t dpfid:2;
-	uint32_t dvfid:6;
-	/*using route by port for destination */
-	uint32_t drbp:1;
-	/**
-	 * sportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t sportid:4;
-	uint32_t spfid:2;
-	uint32_t svfid:6;
-	/* using route by port for source */
-	uint32_t srbp:1;
-	/* Virtual Function Active */
-	uint32_t vfa:1;
-	uint32_t rsv:3;
-};
-
 /** Determines a QDMA job */
 struct rte_dpaa2_qdma_job {
 	/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
 	 */
 	uint16_t status;
 	uint16_t vq_id;
+	uint64_t cnxt;
 	/**
 	 * FLE pool element maintained by user, in case no qDMA response.
 	 * Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param rbp_config
- *   Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_rbp *rbp_config);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
index fc16517f7a..43e8b2d5c5 100644
--- a/drivers/dma/dpaa2/version.map
+++ b/drivers/dma/dpaa2/version.map
@@ -10,5 +10,4 @@ EXPERIMENTAL {
 	rte_dpaa2_qdma_copy_multi;
 	rte_dpaa2_qdma_vchan_fd_us_enable;
 	rte_dpaa2_qdma_vchan_internal_sg_enable;
-	rte_dpaa2_qdma_vchan_rbp_enable;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 02/15] dma/dpaa2: refactor driver code
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-08  7:22         ` [v4 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08  7:22         ` [v4 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
                           ` (12 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Anatoly Burakov; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

refactor the driver code with changes in:
- multiple HW queues
- SMA single copy and SG copy
- silent mode

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |    8 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 2208 ++++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  148 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  130 +-
 drivers/dma/dpaa2/version.map          |   13 -
 5 files changed, 1158 insertions(+), 1349 deletions(-)
 delete mode 100644 drivers/dma/dpaa2/version.map

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index d2c26231e2..079337e61c 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -73,3 +73,11 @@ Platform Requirement
 
 DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
 ``Supported DPAA2 SoCs``.
+
+Device Argumenst
+----------------
+1. Use dev arg option ``fle_pre_populate=1`` to pre-populate all
+   DMA descriptors with pre-initialized values.
+   usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
+2. Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
+   usage example: ``fslmc:dpdmai.1,desc_debug=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5d4749eae3..6c77dc32c4 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #include <rte_eal.h>
@@ -14,220 +14,378 @@
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
-#define DPAA2_QDMA_PREFETCH "prefetch"
+#define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
+#define DPAA2_QDMA_DESC_DEBUG "desc_debug"
 
-uint32_t dpaa2_coherent_no_alloc_cache;
-uint32_t dpaa2_coherent_alloc_cache;
+static uint32_t dpaa2_coherent_no_alloc_cache;
+static uint32_t dpaa2_coherent_alloc_cache;
 
-static inline int
-qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd,
-		     struct dpaa2_qdma_rbp *rbp, int ser)
+static struct fsl_mc_io s_proc_mc_reg;
+
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
 {
-	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+	if (strcmp(value, "1"))
+		return -1;
 
-	fd->simple_pci.len_sl = len;
+	return 0;
+}
 
-	fd->simple_pci.bmt = 1;
-	fd->simple_pci.fmt = 3;
-	fd->simple_pci.sl = 1;
-	fd->simple_pci.ser = ser;
+static int
+dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
 
-	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
-	fd->simple_pci.srbp = rbp->srbp;
-	if (rbp->srbp)
-		fd->simple_pci.rdttype = 0;
-	else
-		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+	if (!devargs)
+		return 0;
 
-	/*dest is pcie memory */
-	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
-	fd->simple_pci.drbp = rbp->drbp;
-	if (rbp->drbp)
-		fd->simple_pci.wrttype = 0;
-	else
-		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
 
-	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
 
-	return 0;
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
 }
 
 static inline int
-qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd, int ser)
+qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
+	const uint16_t *elem, uint16_t nb,
+	uint16_t *free_space)
 {
-	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
-
-	fd->simple_ddr.len = len;
-
-	fd->simple_ddr.bmt = 1;
-	fd->simple_ddr.fmt = 3;
-	fd->simple_ddr.sl = 1;
-	fd->simple_ddr.ser = ser;
-	/**
-	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
-	 * Coherent copy of cacheable memory,
-	* lookup in downstream cache, no allocate
-	 * on miss
-	 */
-	fd->simple_ddr.rns = 0;
-	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
-	/**
-	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
-	 * Coherent write of cacheable memory,
-	 * lookup in downstream cache, no allocate on miss
-	 */
-	fd->simple_ddr.wns = 0;
-	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+	if (unlikely(nb > ring->free_space))
+		return 0;
 
-	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if ((ring->tail + nb) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem, nb * sizeof(uint16_t));
+		ring->tail += nb;
+	} else {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem,
+			(DPAA2_QDMA_MAX_DESC - ring->tail) *
+			sizeof(uint16_t));
+		rte_memcpy(&ring->cntx_idx_ring[0],
+			&elem[DPAA2_QDMA_MAX_DESC - ring->tail],
+			(nb - DPAA2_QDMA_MAX_DESC + ring->tail) *
+			sizeof(uint16_t));
+		ring->tail = (ring->tail + nb) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space -= nb;
+	ring->nb_in_ring += nb;
 
-	return 0;
+	if (free_space)
+		*free_space = ring->free_space;
+
+	return nb;
 }
 
-static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			uint64_t fle_iova,
-			struct dpaa2_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags, uint32_t fmt)
+static inline int
+qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring,
+	uint16_t *elem, uint16_t max)
 {
-	struct qdma_sdd *sdd;
-	uint64_t sdd_iova;
+	int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring;
 
-	sdd = (struct qdma_sdd *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
-			QDMA_FLE_SDD_OFFSET);
-	sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
+	if (!ret)
+		return 0;
+
+	if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			ret * sizeof(uint16_t));
+		ring->start += ret;
+	} else {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			(DPAA2_QDMA_MAX_DESC - ring->start) *
+			sizeof(uint16_t));
+		rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start],
+			&ring->cntx_idx_ring[0],
+			(ret - DPAA2_QDMA_MAX_DESC + ring->start) *
+			sizeof(uint16_t));
+		ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space += ret;
+	ring->nb_in_ring -= ret;
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
+	struct qbman_eq_desc eqdesc;
+	struct qbman_swp *swp;
+	uint32_t num_to_send = 0;
+	uint16_t num_tx = 0;
+	uint32_t enqueue_loop, loop;
+	int ret;
+	struct qbman_fd *fd = qdma_vq->fd;
+	uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx;
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d",
+				rte_gettid());
+			return -EIO;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	/* Prepare enqueue descriptor */
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_fq(&eqdesc, txq_id);
+	qbman_eq_desc_set_no_orp(&eqdesc, 0);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+	while (nb_fds > 0) {
+		num_to_send = (nb_fds > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_fds;
+
+		/* Enqueue the packet to the QBMAN */
+		enqueue_loop = 0;
+		loop = num_to_send;
+
+		while (enqueue_loop < loop) {
+			ret = qbman_swp_enqueue_multiple(swp,
+				&eqdesc,
+				&fd[num_tx + enqueue_loop],
+				NULL,
+				loop - enqueue_loop);
+			if (likely(ret >= 0))
+				enqueue_loop += ret;
+		}
+		num_tx += num_to_send;
+		nb_fds -= loop;
+	}
+
+	qdma_vq->num_enqueues += num_tx;
+	if (unlikely(num_tx != qdma_vq->fd_idx)) {
+		dst_idx = 0;
+		for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) {
+			rte_memcpy(&qdma_vq->fd[dst_idx],
+				&qdma_vq->fd[idx],
+				sizeof(struct qbman_fd));
+			dst_idx++;
+		}
+	}
+	qdma_vq->fd_idx -= num_tx;
+
+	return num_tx;
+}
+
+static void
+fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
+	struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest,
+	uint32_t fmt)
+{
+	struct qbman_fle *fle = fle_sdd->fle;
+	struct qdma_sdd *sdd = fle_sdd->sdd;
+	uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd);
 
 	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, sdd_iova);
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)));
 
 	/* source and destination descriptor */
 	if (rbp && rbp->enable) {
 		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
 		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
 		}
-		sdd++;
 		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
 		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
 		}
-
 	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
 	}
-	fle++;
 	/* source frame list to source buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
 #endif
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
-	fle++;
 	/* destination frame list to destination buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
 #endif
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
 }
 
-static inline int
-dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
-		     struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_job **job,
-		     uint16_t nb_jobs)
+static void
+sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
+{
+	uint16_t i;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+		/* source SG */
+		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+		/* destination SG */
+		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+	}
+}
+
+static void
+fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx,
+	struct qdma_virt_queue *qdma_vq)
 {
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	size_t iova;
-	int ret = 0, loop;
-	int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
-				0 : 1;
-
-	for (loop = 0; loop < nb_jobs; loop++) {
-		if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
-			iova = (size_t)job[loop]->dest;
-		else
-			iova = (size_t)job[loop]->src;
-
-		/* Set the metadata */
-		job[loop]->vq_id = qdma_vq->vq_id;
-		ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-		*ppjob = job[loop];
-
-		if ((rbp->drbp == 1) || (rbp->srbp == 1))
-			ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], rbp, ser);
-		else
-			ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], ser);
+
+	memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg));
+
+	src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge);
+	dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge);
+
+	sg_entry_pre_populate(sg_cntx);
+	fle_sdd_pre_populate(&sg_cntx->fle_sdd,
+		rbp, src_sge_iova, dst_sge_iova,
+		QBMAN_FLE_WORD4_FMT_SGE);
+}
+
+static inline uint32_t
+sg_entry_post_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i;
+	uint32_t total_len = 0;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < (nb_sge - 1); i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
+
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
+		total_len += dst[i].length;
+
+		src_sge->ctrl.f = 0;
+		dst_sge->ctrl.f = 0;
+		src_sge++;
+		dst_sge++;
 	}
 
-	return ret;
+	if (unlikely(src[i].length != dst[i].length))
+		return -ENOTSUP;
+
+	src_sge->addr_lo = (uint32_t)src[i].addr;
+	src_sge->addr_hi = (src[i].addr >> 32);
+	src_sge->data_len.data_len_sl0 = src[i].length;
+
+	dst_sge->addr_lo = (uint32_t)dst[i].addr;
+	dst_sge->addr_hi = (dst[i].addr >> 32);
+	dst_sge->data_len.data_len_sl0 = dst[i].length;
+
+	total_len += dst[i].length;
+	sg_cntx->job_nb = nb_sge;
+
+	src_sge->ctrl.f = QDMA_SG_F;
+	dst_sge->ctrl.f = QDMA_SG_F;
+
+	return total_len;
+}
+
+static inline void
+sg_fle_post_populate(struct qbman_fle fle[],
+	size_t len)
+{
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
-static uint32_t
-qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
-		       struct qdma_sg_entry *src_sge,
-		       struct qdma_sg_entry *dst_sge,
-		       uint16_t nb_jobs)
+static inline uint32_t
+sg_entry_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
 {
 	uint16_t i;
 	uint32_t total_len = 0;
-	uint64_t iova;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < nb_jobs; i++) {
-		/* source SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
-			src_sge->addr_lo = (uint32_t)jobs[i]->src;
-			src_sge->addr_hi = (jobs[i]->src >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
-			src_sge->addr_lo = (uint32_t)iova;
-			src_sge->addr_hi = iova >> 32;
-		}
-		src_sge->data_len.data_len_sl0 = jobs[i]->len;
+	for (i = 0; i < nb_sge; i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -235,16 +393,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		/* destination SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
-			dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
-			dst_sge->addr_hi = (jobs[i]->dest >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
-			dst_sge->addr_lo = (uint32_t)iova;
-			dst_sge->addr_hi = iova >> 32;
-		}
-		dst_sge->data_len.data_len_sl0 = jobs[i]->len;
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -252,9 +403,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += jobs[i]->len;
+		total_len += src[i].length;
 
-		if (i == (nb_jobs - 1)) {
+		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
 			dst_sge->ctrl.f = QDMA_SG_F;
 		} else {
@@ -265,325 +416,452 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 		dst_sge++;
 	}
 
+	sg_cntx->job_nb = nb_sge;
+
 	return total_len;
 }
 
-static inline int
-dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
-				  struct qbman_fd *fd,
-				  struct rte_dpaa2_qdma_job **job,
-				  uint16_t nb_jobs)
+static inline void
+fle_populate(struct qbman_fle fle[],
+	struct qdma_sdd sdd[], uint64_t sdd_iova,
+	struct dpaa2_qdma_rbp *rbp,
+	uint64_t src_iova, uint64_t dst_iova, size_t len,
+	uint32_t fmt)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
-	void *elem;
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		(DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))));
 
-	for (i = 0; i < nb_jobs; i++) {
-		elem = job[i]->usr_elem;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem);
-#endif
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+		if (rbp->srbp) {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
+		}
+		/* destination */
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
-		job[i]->vq_id = qdma_vq->vq_id;
+		if (rbp->drbp) {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
+		}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	} else {
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
+	}
+	/* source frame list to source buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+#endif
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
+	/* destination frame list to destination buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+#endif
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-				DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
+}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-			job[i]->src, job[i]->dest, job[i]->len,
-			job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
-	}
+static inline void
+fle_post_populate(struct qbman_fle fle[],
+	uint64_t src, uint64_t dest, size_t len)
+{
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-	return 0;
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
 static inline int
-dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
-			   struct qbman_fd *fd,
-			   struct rte_dpaa2_qdma_job **job,
-			   uint16_t nb_jobs)
+dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	uint16_t expected = qdma_vq->fd_idx;
 	int ret;
-	void *elem[DPAA2_QDMA_MAX_DESC];
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
 
-	ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
+	ret = dpaa2_qdma_multi_eq(qdma_vq);
+	if (likely(ret == expected))
+		return 0;
 
-	for (i = 0; i < nb_jobs; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem[i]);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
-#endif
+	return -EBUSY;
+}
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem[i] +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+static inline void
+dpaa2_qdma_fle_dump(const struct qbman_fle *fle)
+{
+	DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d",
+		fle->addr_hi, fle->addr_lo, fle->length, fle->frc,
+		fle->word4.bpid);
+	DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d",
+		fle->word4.ivp, fle->word4.bmt, fle->word4.offset,
+		fle->word4.fmt, fle->word4.sl, fle->word4.f);
+}
 
-		job[i]->vq_id = qdma_vq->vq_id;
+static inline void
+dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd)
+{
+	DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x",
+		sdd->stride, sdd->rbpcmd, sdd->cmd);
+}
+
+static inline void
+dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge)
+{
+	DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x",
+		sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0,
+		sge->ctrl_fields);
+}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+static void
+dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
+{
+	int i;
+	const struct qdma_cntx_fle_sdd *fle_sdd;
+	const struct qdma_sdd *sdd;
+	const struct qdma_cntx_sg *cntx_sg = NULL;
+	const struct qdma_cntx_long *cntx_long = NULL;
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
-		DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
+	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
+	sdd = fle_sdd->sdd;
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) {
+		DPAA2_QDMA_INFO("fle[%d] info:", i);
+		dpaa2_qdma_fle_dump(&fle[i]);
+	}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-				job[i]->src, job[i]->dest, job[i]->len,
-				job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
+	if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
+		fle[DPAA2_QDMA_DST_FLE].word4.fmt) {
+		DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)",
+			DPAA2_QDMA_SRC_FLE,
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt,
+			DPAA2_QDMA_DST_FLE,
+			fle[DPAA2_QDMA_DST_FLE].word4.fmt);
+
+		return;
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SGE) {
+		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
+			fle_sdd);
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SBF) {
+		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
+			fle_sdd);
+	} else {
+		DPAA2_QDMA_ERR("Unsupported fle format:%d",
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
+		return;
 	}
 
-	return 0;
+	for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) {
+		DPAA2_QDMA_INFO("sdd[%d] info:", i);
+		dpaa2_qdma_sdd_dump(&sdd[i]);
+	}
+
+	if (cntx_long) {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
+			cntx_long->cntx_idx);
+	}
+
+	if (cntx_sg) {
+		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
+			cntx_sg->job_nb);
+		if (!cntx_sg->job_nb ||
+			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			DPAA2_QDMA_ERR("Invalid SG job number:%d",
+				cntx_sg->job_nb);
+			return;
+		}
+		for (i = 0; i < cntx_sg->job_nb; i++) {
+			DPAA2_QDMA_INFO("sg[%d] src info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]);
+			DPAA2_QDMA_INFO("sg[%d] dst info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]);
+			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
+				cntx_sg->cntx_idx[i]);
+		}
+	}
 }
 
-static inline int
-dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
-			struct qbman_fd *fd,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
+static int
+dpaa2_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	void *elem;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected, i;
+	uint32_t len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_sg *cntx_sg = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova, src, dst;
-	int ret = 0, i;
-	struct qdma_sg_entry *src_sge, *dst_sge;
-	uint32_t len, fmt, flags;
-
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
-		elem = job[0]->usr_elem;
+	struct qdma_sdd *sdd;
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
+			nb_src, nb_dst);
+		return -ENOTSUP;
+	}
+
+	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
+			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+		return -EINVAL;
+	}
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_dev->is_silent) {
+		cntx_sg = qdma_vq->cntx_sg[qdma_vq->silent_idx];
 	} else {
-		ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
-		if (ret) {
-			DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_sg);
+		if (ret)
 			return ret;
-		}
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+		for (i = 0; i < nb_src; i++)
+			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	elem_iova = rte_mempool_virt2iova(elem);
+	cntx_iova = rte_mempool_virt2iova(cntx_sg);
 #else
-	elem_iova = DPAA2_VADDR_TO_IOVA(elem);
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
 #endif
 
-	/* Set the metadata */
-	/* Save job context. */
-	*((uint16_t *)
-	((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
-	ppjob = (struct rte_dpaa2_qdma_job **)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
-	for (i = 0; i < nb_jobs; i++)
-		ppjob[i] = job[i];
-
-	ppjob[0]->vq_id = qdma_vq->vq_id;
-
-	fle = (struct qbman_fle *)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-	fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	fle = cntx_sg->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_sg, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
 	DPAA2_SET_FD_ADDR(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
-		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
+			if (!qdma_dev->is_silent && cntx_sg && idx_addr) {
+				for (i = 0; i < nb_src; i++)
+					cntx_sg->cntx_idx[i] = idx_addr[i];
+			}
+		}
 
-	/* Populate FLE */
-	if (likely(nb_jobs > 1)) {
-		src_sge = (struct qdma_sg_entry *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
-		dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
-		src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
-		dst = src +
-			DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
-		len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
-		fmt = QBMAN_FLE_WORD4_FMT_SGE;
-		flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
+		len = sg_entry_post_populate(src, dst,
+			cntx_sg, nb_src);
+		sg_fle_post_populate(fle, len);
 	} else {
-		src = job[0]->src;
-		dst = job[0]->dest;
-		len = job[0]->len;
-		fmt = QBMAN_FLE_WORD4_FMT_SBF;
-		flags = job[0]->flags;
+		sdd = cntx_sg->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		src_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_src_entry);
+		dst_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_dst_entry);
+		len = sg_entry_populate(src, dst,
+			cntx_sg, nb_src);
+
+		fle_populate(fle, sdd, sdd_iova,
+			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
+			QBMAN_FLE_WORD4_FMT_SGE);
 	}
 
-	memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
-
-	dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-					src, dst, len, flags, fmt);
-
-	return 0;
-}
-
-static inline uint16_t
-dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
-		      const struct qbman_fd *fd,
-		      struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
-{
-	uint16_t vqid;
-	size_t iova;
-	struct rte_dpaa2_qdma_job **ppjob;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
-	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
-		iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
-				| (uint64_t)fd->simple_pci.daddr_lo);
-	else
-		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
-				| (uint64_t)fd->simple_pci.saddr_lo);
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-	*job = (struct rte_dpaa2_qdma_job *)*ppjob;
-	(*job)->status = (fd->simple_pci.acc_err << 8) |
-					(fd->simple_pci.error);
-	vqid = (*job)->vq_id;
-	*nb_jobs = 1;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return vqid;
+	return ret;
 }
 
-static inline uint16_t
-dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-			     const struct qbman_fd *fd,
-			     struct rte_dpaa2_qdma_job **job,
-			     uint16_t *nb_jobs)
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_long *cntx_long = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t status;
+	struct qdma_sdd *sdd;
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	memset(fd, 0, sizeof(struct qbman_fd));
 
-	*nb_jobs = 1;
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-			QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
+	if (qdma_dev->is_silent) {
+		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	} else {
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_long);
+		if (ret)
+			return ret;
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
+	}
 
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	cntx_iova = rte_mempool_virt2iova(cntx_long);
+#else
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
+#endif
 
-	*job = *ppjob;
-	(*job)->status = status;
+	fle = cntx_long->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_long, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+				&qdma_vq->rbp,
+				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+			if (!qdma_dev->is_silent && cntx_long) {
+				cntx_long->cntx_idx =
+					DPAA2_QDMA_IDX_FROM_FLAG(flags);
+			}
+		}
 
-	return (*job)->vq_id;
-}
+		fle_post_populate(fle, src, dst, length);
+	} else {
+		sdd = cntx_long->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_long, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
+			src, dst, length,
+			QBMAN_FLE_WORD4_FMT_SBF);
+	}
 
-static inline uint16_t
-dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-			 const struct qbman_fd *fd,
-			 struct rte_dpaa2_qdma_job **job,
-			 uint16_t *nb_jobs)
-{
-	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t i, status;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	*nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	for (i = 0; i < (*nb_jobs); i++) {
-		job[i] = ppjob[i];
-		job[i]->status = status;
-	}
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return job[0]->vq_id;
+	return ret;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
-static int
-dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-				     uint16_t *vq_id,
-				     struct rte_dpaa2_qdma_job **job,
-				     uint16_t nb_jobs)
+static uint16_t
+dpaa2_qdma_dequeue(void *dev_private,
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *cntx_idx, bool *has_error)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
 	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
 	uint8_t status, pending;
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
 	int ret, pull_size;
+	struct qbman_fle *fle;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_long *cntx_long;
+	uint16_t free_space = 0, fle_elem_nb = 0;
 
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
+	if (unlikely(qdma_dev->is_silent))
+		return 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
+			DPAA2_QDMA_ERR("Allocate portal err, tid(%d)",
 				rte_gettid());
+			if (has_error)
+				*has_error = true;
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_cpls > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_cpls;
+	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
+	fqid = rxq->fqid;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -592,21 +870,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->last_num_pkts = pull_size;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      q_storage->last_num_pkts);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+			q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
-				get_swp_active_dqs(
-				DPAA2_PER_LCORE_DPIO->index)))
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
 				;
 			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued.QBMAN busy");
+				DPAA2_QDMA_DP_WARN("QBMAN busy");
 					/* Portal was busy, try again */
 				continue;
 			}
@@ -615,7 +892,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->active_dqs = dq_storage;
 		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
-				   dq_storage);
+			dq_storage);
 	}
 
 	dq_storage = q_storage->active_dqs;
@@ -629,7 +906,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
-	qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
 
@@ -662,563 +939,239 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-
-		vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
-								&num_rx_ret);
-		if (vq_id)
-			vq_id[num_rx] = vqid;
-
-		dq_storage++;
-		num_rx += num_rx_ret;
-	} while (pending);
-
-	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-		while (!qbman_check_command_complete(
-			get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-			;
-		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
-	}
-	/* issue a volatile dequeue command for next pull */
-	while (1) {
-		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN(
-				"VDQ command is not issued. QBMAN is busy (2)");
-			continue;
-		}
-		break;
-	}
-
-	q_storage->active_dqs = dq_storage1;
-	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
-	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-					uint16_t *vq_id,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
-	struct qbman_result *dq_storage;
-	struct qbman_pull_desc pulldesc;
-	struct qbman_swp *swp;
-	uint8_t status, pending;
-	uint8_t num_rx = 0;
-	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
-	int ret, next_pull, num_pulled = 0;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
-
-	next_pull = nb_jobs;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	rxq = &(dpdmai_dev->rx_queue[0]);
-
-	do {
-		dq_storage = rxq->q_storage->dq_storage[0];
-		/* Prepare dequeue descriptor */
-		qbman_pull_desc_clear(&pulldesc);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
-		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-
-		if (next_pull > dpaa2_dqrr_size) {
-			qbman_pull_desc_set_numframes(&pulldesc,
-					dpaa2_dqrr_size);
-			next_pull -= dpaa2_dqrr_size;
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		fle = fle_sdd->fle;
+		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
+		fle_elem_nb++;
+		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+			QBMAN_FLE_WORD4_FMT_SGE) {
+			cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, &free_space);
 		} else {
-			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
-			next_pull = 0;
-		}
-
-		while (1) {
-			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued. QBMAN busy");
-				/* Portal was busy, try again */
-				continue;
-			}
-			break;
-		}
-
-		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-		/* Check if the previous issued command is completed. */
-		while (!qbman_check_command_complete(dq_storage))
-			;
-
-		num_pulled = 0;
-		pending = 1;
-
-		do {
-			/* Loop until dq_storage is updated
-			 * with new token by QBMAN
-			 */
-			while (!qbman_check_new_result(dq_storage))
-				;
-			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-
-			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-				pending = 0;
-				/* Check for valid frame. */
-				status = qbman_result_DQ_flags(dq_storage);
-				if (unlikely((status &
-					QBMAN_DQ_STAT_VALIDFRAME) == 0))
-					continue;
-			}
-			fd = qbman_result_DQ_fd(dq_storage);
-
-			vqid = qdma_vq->get_job(qdma_vq, fd,
-						&job[num_rx], &num_rx_ret);
-			if (vq_id)
-				vq_id[num_rx] = vqid;
-
-			dq_storage++;
-			num_rx += num_rx_ret;
-			num_pulled++;
-
-		} while (pending);
-	/* Last VDQ provided all packets and more packets are requested */
-	} while (next_pull && num_pulled == dpaa2_dqrr_size);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
-	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
-	struct qbman_eq_desc eqdesc;
-	struct qbman_swp *swp;
-	uint32_t num_to_send = 0;
-	uint16_t num_tx = 0;
-	uint32_t enqueue_loop, loop;
-	int ret;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
+			cntx_long = container_of(fle_sdd,
+				struct qdma_cntx_long, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&cntx_long->cntx_idx,
+				1, &free_space);
 		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	/* Prepare enqueue descriptor */
-	qbman_eq_desc_clear(&eqdesc);
-	qbman_eq_desc_set_fq(&eqdesc, txq_id);
-	qbman_eq_desc_set_no_orp(&eqdesc, 0);
-	qbman_eq_desc_set_response(&eqdesc, 0, 0);
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		uint16_t fd_nb;
-		uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		uint16_t job_idx = 0;
-		uint16_t fd_sg_nb[8];
-		uint16_t nb_jobs_ret = 0;
-
-		if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
-		else
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
-
-		memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
-
-		for (loop = 0; loop < fd_nb; loop++) {
-			ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
-					      sg_entry_nb);
-			if (unlikely(ret < 0))
-				return 0;
-			fd_sg_nb[loop] = sg_entry_nb;
-			nb_jobs -= sg_entry_nb;
-			job_idx += sg_entry_nb;
-			sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		}
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-
-		while (enqueue_loop < fd_nb) {
-			ret = qbman_swp_enqueue_multiple(swp,
-					&eqdesc, &fd[enqueue_loop],
-					NULL, fd_nb - enqueue_loop);
-			if (likely(ret >= 0)) {
-				for (loop = 0; loop < (uint32_t)ret; loop++)
-					nb_jobs_ret +=
-						fd_sg_nb[enqueue_loop + loop];
-				enqueue_loop += ret;
-			}
-		}
-
-		return nb_jobs_ret;
-	}
-
-	memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
-
-	while (nb_jobs > 0) {
-		num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
-			dpaa2_eqcr_size : nb_jobs;
-
-		ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
-						&job[num_tx], num_to_send);
-		if (unlikely(ret < 0))
-			break;
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-		loop = num_to_send;
-
-		while (enqueue_loop < loop) {
-			ret = qbman_swp_enqueue_multiple(swp,
-						&eqdesc,
-						&fd[num_tx + enqueue_loop],
-						NULL,
-						loop - enqueue_loop);
-			if (likely(ret >= 0))
-				enqueue_loop += ret;
-		}
-		num_tx += num_to_send;
-		nb_jobs -= loop;
-	}
-
-	qdma_vq->num_enqueues += num_tx;
-
-	return num_tx;
-}
-
-static inline int
-dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
-				qdma_vq->num_valid_jobs);
-
-	qdma_vq->num_valid_jobs = 0;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-		   rte_iova_t src, rte_iova_t dst,
-		   uint32_t length, uint64_t flags)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *job;
-	int idx, ret;
-
-	idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
-
-	ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return -ENOSPC;
-	}
-
-	job->src = src;
-	job->dest = dst;
-	job->len = length;
-	job->flags = flags;
-	job->status = 0;
-	job->vq_id = vchan;
-
-	qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
-	qdma_vq->num_valid_jobs++;
-
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
-		dpaa2_qdma_submit(dev_private, vchan);
-
-	return idx;
-}
-
-int
-rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-			  struct rte_dpaa2_qdma_job **jobs,
-			  uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
-			 struct qdma_virt_queue *qdma_vq,
-			 struct rte_dpaa2_qdma_job **jobs,
-			 uint16_t nb_jobs)
-{
-	struct qdma_virt_queue *temp_qdma_vq;
-	int ring_count;
-	int ret = 0, i;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-	}
-
-	/* Only dequeue when there are pending jobs on VQ */
-	if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
-		return 0;
-
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
-		qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
-		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
-				qdma_vq->num_dequeues), nb_jobs);
-
-	if (qdma_vq->exclusive_hw_queue) {
-		/* In case of exclusive queue directly fetch from HW queue */
-		ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-		if (ret < 0) {
-			DPAA2_QDMA_ERR(
-				"Dequeue from DPDMAI device failed: %d", ret);
-			return ret;
-		}
-	} else {
-		uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
+		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+			pending = 0;
 
-		/* Get the QDMA completed jobs from the software ring.
-		 * In case they are not available on the ring poke the HW
-		 * to fetch completed jobs from corresponding HW queues
-		 */
-		ring_count = rte_ring_count(qdma_vq->status_ring);
-		if (ring_count < nb_jobs) {
-			ret = qdma_vq->dequeue_job(qdma_vq,
-					temp_vq_id, jobs, nb_jobs);
-			for (i = 0; i < ret; i++) {
-				temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
-				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(jobs[i]));
-			}
-			ring_count = rte_ring_count(
-					qdma_vq->status_ring);
-		}
+		dq_storage++;
+	} while (pending);
 
-		if (ring_count) {
-			/* Dequeue job from the software ring
-			 * to provide to the user
-			 */
-			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-						    (void **)jobs,
-						    ring_count, NULL);
+	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+		while (!qbman_check_command_complete(
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+			;
+		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+	}
+	/* issue a volatile dequeue command for next pull */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_QDMA_DP_WARN("QBMAN is busy (2)");
+			continue;
 		}
+		break;
 	}
 
-	qdma_vq->num_dequeues += ret;
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			  const uint16_t nb_cpls,
-			  uint16_t *last_idx,
-			  enum rte_dma_status_code *st)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret, i;
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
-
-	for (i = 0; i < ret; i++)
-		st[i] = jobs[i]->status;
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue(void *dev_private,
-		   uint16_t vchan, const uint16_t nb_cpls,
-		   uint16_t *last_idx, bool *has_error)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret;
-
-	RTE_SET_USED(has_error);
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
-				jobs, nb_cpls);
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
+	q_storage->active_dqs = dq_storage1;
+	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
+	rte_mempool_put_bulk(qdma_vq->fle_pool,
+		qdma_vq->fle_elem, fle_elem_nb);
 
-	return ret;
-}
+	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
+		cntx_idx, nb_cpls);
 
-uint16_t
-rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-			       struct rte_dpaa2_qdma_job **jobs,
-			       uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	if (has_error)
+		*has_error = false;
 
-	return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	return num_rx;
 }
 
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
-		    struct rte_dma_info *dev_info,
-		    uint32_t info_sz)
+	struct rte_dma_info *dev_info,
+	uint32_t info_sz __rte_unused)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+		RTE_DMA_CAPA_MEM_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_MEM |
+		RTE_DMA_CAPA_SILENT |
+		RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->dev_name = dev->device->name;
+	if (dpdmai_dev->qdma_dev)
+		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
 
 	return 0;
 }
 
 static int
 dpaa2_qdma_configure(struct rte_dma_dev *dev,
-		     const struct rte_dma_conf *dev_conf,
-		     uint32_t conf_sz)
+	const struct rte_dma_conf *dev_conf,
+	uint32_t conf_sz)
 {
-	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint16_t i;
+	struct dpdmai_rx_queue_cfg rx_queue_cfg;
+	struct dpdmai_rx_queue_attr rx_attr;
+	struct dpdmai_tx_queue_attr tx_attr;
+	struct dpaa2_queue *rxq;
+	int ret = 0;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before config.");
-		return -1;
-	}
+	if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+		DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+			dev->data->dev_name, dev_conf->nb_vchans,
+			dpdmai_dev->num_queues);
 
-	/* Allocate Virtual Queues */
-	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
-	qdma_dev->vqs = rte_malloc(name,
-			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
-			RTE_CACHE_LINE_SIZE);
-	if (!qdma_dev->vqs) {
-		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
-		return -ENOMEM;
+		return -ENOTSUP;
 	}
-	qdma_dev->num_vqs = dev_conf->nb_vchans;
-
-	return 0;
-}
-
-static int
-check_devargs_handler(__rte_unused const char *key,
-		      const char *value,
-		      __rte_unused void *opaque)
-{
-	if (strcmp(value, "1"))
-		return -1;
 
-	return 0;
-}
+	if (qdma_dev->vqs) {
+		DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+			dev->data->dev_name,
+			qdma_dev->num_vqs, dev_conf->nb_vchans);
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if ((qdma_dev->vqs[i].num_enqueues !=
+				qdma_dev->vqs[i].num_dequeues) &&
+				!qdma_dev->is_silent) {
+				DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+					i, qdma_dev->vqs[i].num_enqueues -
+					qdma_dev->vqs[i].num_dequeues);
+				return -EBUSY;
+			}
+		}
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+			rxq = &dpdmai_dev->rx_queue[i];
+			if (rxq->q_storage) {
+				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+					dev->data->dev_name, i);
+				dpaa2_free_dq_storage(rxq->q_storage);
+				rte_free(rxq->q_storage);
+				rxq->q_storage = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+		qdma_dev->num_vqs = 0;
+	}
 
-static int
-dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
-{
-	struct rte_kvargs *kvlist;
+	/* Set up Rx Queues */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		rxq = &dpdmai_dev->rx_queue[i];
+		ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			return ret;
+		}
+	}
 
-	if (!devargs)
-		return 0;
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
 
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (!kvlist)
-		return 0;
+		ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+	}
 
-	if (!rte_kvargs_count(kvlist, key)) {
-		rte_kvargs_free(kvlist);
-		return 0;
+	/* Allocate Virtual Queues */
+	qdma_dev->vqs = rte_zmalloc(NULL,
+		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+		RTE_CACHE_LINE_SIZE);
+	if (!qdma_dev->vqs) {
+		DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.",
+			dev->data->dev_name, dev_conf->nb_vchans);
+		return -ENOMEM;
 	}
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		qdma_dev->vqs[i].vq_id = i;
+		rxq = &dpdmai_dev->rx_queue[i];
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq->q_storage = rte_zmalloc(NULL,
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 
-	if (rte_kvargs_process(kvlist, key,
-			       check_devargs_handler, NULL) < 0) {
-		rte_kvargs_free(kvlist);
-		return 0;
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 	}
-	rte_kvargs_free(kvlist);
 
-	return 1;
-}
+	qdma_dev->num_vqs = dev_conf->nb_vchans;
+	qdma_dev->is_silent = dev_conf->enable_silent;
 
-/* Enable FD in Ultra Short format */
-void
-rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	return 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
-}
+alloc_failed:
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
 
-/* Enable internal SG processing */
-void
-rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	rte_free(qdma_dev->vqs);
+	qdma_dev->vqs = NULL;
+	qdma_dev->num_vqs = 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
+	return ret;
 }
 
 static int
@@ -1257,16 +1210,14 @@ dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 
 static int
 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
-		       const struct rte_dma_vchan_conf *conf,
-		       uint32_t conf_sz)
+	const struct rte_dma_vchan_conf *conf,
+	uint32_t conf_sz)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
-	char ring_name[32];
 	char pool_name[64];
-	int fd_long_format = 1;
-	int sg_enable = 0, ret;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1276,99 +1227,67 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	if (ret)
 		return ret;
 
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
-		sg_enable = 1;
-
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
-		fd_long_format = 0;
-
-	if (dev->data->dev_conf.enable_silent)
-		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
-
-	if (sg_enable) {
-		if (qdma_dev->num_vqs != 1) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports physical queue!");
-			return -ENODEV;
-		}
-		if (!fd_long_format) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports long FD format!");
-			return -ENODEV;
-		}
-		pool_size = QDMA_FLE_SG_POOL_SIZE;
-	} else {
-		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_FLE_PRE_POPULATE))
+		qdma_dev->vqs[vchan].fle_pre_populate = 1;
+	else
+		qdma_dev->vqs[vchan].fle_pre_populate = 0;
 
-	if (qdma_dev->num_vqs == 1)
-		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
-	else {
-		/* Allocate a Ring for Virtual Queue in VQ mode */
-		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
-			 dev->data->dev_id, vchan);
-		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
-			conf->nb_desc, rte_socket_id(), 0);
-		if (!qdma_dev->vqs[vchan].status_ring) {
-			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			return rte_errno;
-		}
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_DESC_DEBUG))
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG;
+	else
+		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
+			    sizeof(struct qdma_cntx_long));
+
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+			DPAA2_QDMA_MAX_DESC * 2, pool_size,
+			512, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
 	if (!qdma_dev->vqs[vchan].fle_pool) {
-		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
-		return -ENOMEM;
-	}
-
-	snprintf(pool_name, sizeof(pool_name),
-		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev->vqs[vchan].job_pool) {
-		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
 
-	if (fd_long_format) {
-		if (sg_enable) {
-			qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
-		} else {
-			if (dev->data->dev_conf.enable_silent)
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf_no_rsp;
-			else
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
+	if (qdma_dev->is_silent) {
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_sg,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("sg cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
+		}
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_long,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
 		}
 	} else {
-		qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
-		qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
-	}
-
-	if (dpaa2_qdma_get_devargs(dev->device->devargs,
-			DPAA2_QDMA_PREFETCH)) {
-		/* If no prefetch is configured. */
-		qdma_dev->vqs[vchan].dequeue_job =
-				dpdmai_dev_dequeue_multijob_prefetch;
-		DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
-	} else {
-		qdma_dev->vqs[vchan].dequeue_job =
-			dpdmai_dev_dequeue_multijob_no_prefetch;
+		qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL,
+				sizeof(struct qdma_cntx_idx_ring),
+				RTE_CACHE_LINE_SIZE);
+		if (!qdma_dev->vqs[vchan].ring_cntx_idx) {
+			DPAA2_QDMA_ERR("DQ response ring alloc failed.");
+			return -ENOMEM;
+		}
+		qdma_dev->vqs[vchan].ring_cntx_idx->start = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->free_space =
+				QDMA_CNTX_IDX_RING_MAX_FREE;
+		qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0;
+		qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL,
+				sizeof(void *) * DPAA2_QDMA_MAX_DESC,
+				RTE_CACHE_LINE_SIZE);
 	}
 
 	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
 	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
-	qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
 
 	return 0;
 }
@@ -1377,11 +1296,17 @@ static int
 dpaa2_qdma_start(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 1;
+	/* Enable the device */
+	ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1390,46 +1315,71 @@ static int
 dpaa2_qdma_stop(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 0;
+	/* Disable the device */
+	ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
 
 static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct dpaa2_queue *rxq;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before reset.");
-		return -EBUSY;
-	}
+	if (!qdma_dev)
+		return 0;
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
-		    qdma_dev->vqs[i].num_dequeues)) {
-			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+		if ((qdma_dev->vqs[i].num_enqueues !=
+		    qdma_dev->vqs[i].num_dequeues) &&
+		    !qdma_dev->is_silent) {
+			DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")",
+				i, qdma_dev->vqs[i].num_enqueues,
+				qdma_dev->vqs[i].num_dequeues);
 			return -EBUSY;
 		}
 	}
 
-	/* Reset and free virtual queues */
+	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		rte_ring_free(qdma_dev->vqs[i].status_ring);
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	if (qdma_dev->vqs) {
+		/* Free RXQ fle pool */
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
 	}
-	rte_free(qdma_dev->vqs);
-	qdma_dev->vqs = NULL;
 
 	/* Reset QDMA device structure */
 	qdma_dev->num_vqs = 0;
@@ -1438,18 +1388,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 }
 
 static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	dpaa2_qdma_reset(dev);
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1504,123 +1444,44 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			     dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("dmdmai disable failed");
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
+			dpdmai_dev->dpdmai_id);
+		return 0;
+	}
 
-	/* Set up the DQRR storage for Rx */
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	/* Close the device at underlying layer*/
+	ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+			dpdmai_dev->dpdmai_id, ret);
 
-	if (rxq->q_storage) {
-		dpaa2_free_dq_storage(rxq->q_storage);
-		rte_free(rxq->q_storage);
+		return ret;
 	}
 
-	/* Close the device at underlying layer*/
-	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("Failure closing dpdmai device");
+	if (qdma_dev) {
+		rte_free(qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
 
-	return 0;
+	return ret;
 }
 
 static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpdmai_rx_queue_cfg rx_queue_cfg;
 	struct dpdmai_attr attr;
-	struct dpdmai_rx_queue_attr rx_attr;
-	struct dpdmai_tx_queue_attr tx_attr;
-	struct dpaa2_queue *rxq;
-	int ret;
+	int ret, err;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* Open DPDMAI device */
-	dpdmai_dev->dpdmai_id = dpdmai_id;
-	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
-					  RTE_CACHE_LINE_SIZE);
-	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
-		return ret;
-	}
-
-	/* Get DPDMAI attributes */
-	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				    dpdmai_dev->token, &attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->num_queues = attr.num_of_queues;
-
-	/* Set up Rx Queue */
-	memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-	ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				  CMD_PRI_LOW,
-				  dpdmai_dev->token,
-				  0, 0, &rx_queue_cfg);
-	if (ret) {
-		DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-
-	/* Allocate DQ storage for the DPDMAI Rx queues */
-	rxq = &(dpdmai_dev->rx_queue[0]);
-	rxq->q_storage = rte_malloc("dq_storage",
-				    sizeof(struct queue_storage_info_t),
-				    RTE_CACHE_LINE_SIZE);
-	if (!rxq->q_storage) {
-		DPAA2_QDMA_ERR("q_storage allocation failed");
-		ret = -ENOMEM;
-		goto init_err;
-	}
-
-	memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-	ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
-		goto init_err;
-	}
-
-	/* Get Rx and Tx queues FQID */
-	ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &rx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
-
-	ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &tx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
-
-	/* Enable the device */
-	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			    dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
-		goto init_err;
-	}
-
 	if (!dpaa2_coherent_no_alloc_cache) {
 		if (dpaa2_svr_family == SVR_LX2160A) {
 			dpaa2_coherent_no_alloc_cache =
@@ -1635,24 +1496,76 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 		}
 	}
 
-	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+	if (!s_proc_mc_reg.regs)
+		s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+			dpdmai_id);
+		if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+			DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+				dpdmai_id, dpdmai_dev->dpdmai_id);
+			return -EINVAL;
+		}
+		if (!dpdmai_dev->qdma_dev) {
+			DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+				dpdmai_id);
+			return -ENOMEM;
+		}
+		if (dpdmai_dev->qdma_dev->num_vqs) {
+			DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+				dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+		}
+
+		return 0;
+	}
+
+	/* Open DPDMAI device */
+	dpdmai_dev->dpdmai_id = dpdmai_id;
+
+	if (dpdmai_dev->qdma_dev) {
+		rte_free(dpdmai_dev->qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
+	dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
+		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
+	if (!dpdmai_dev->qdma_dev) {
+		DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+			dpdmai_id);
+		return -ENOMEM;
+	}
+	ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		return ret;
+	}
 
-	/* Reset the QDMA device */
-	ret = dpaa2_qdma_reset(dev);
+	/* Get DPDMAI attributes */
+	ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token, &attr);
 	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		goto init_err;
+		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+		if (err) {
+			DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+				dpdmai_dev->dpdmai_id, err);
+		}
+		return ret;
 	}
+	dpdmai_dev->num_queues = attr.num_of_queues;
+
+	DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
 
 	return 0;
-init_err:
-	dpaa2_dpdmai_dev_uninit(dev);
-	return ret;
 }
 
 static int
 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
-		 struct rte_dpaa2_device *dpaa2_dev)
+	struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_dma_dev *dmadev;
 	int ret;
@@ -1662,8 +1575,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	RTE_SET_USED(dpaa2_drv);
 
 	dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
-				      rte_socket_id(),
-				      sizeof(struct dpaa2_dpdmai_dev));
+		rte_socket_id(),
+		sizeof(struct dpaa2_dpdmai_dev));
 	if (!dmadev) {
 		DPAA2_QDMA_ERR("Unable to allocate dmadevice");
 		return -EINVAL;
@@ -1673,10 +1586,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	dmadev->dev_ops = &dpaa2_qdma_ops;
 	dmadev->device = &dpaa2_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
-	dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
+	dmadev->fp_obj->copy = dpaa2_qdma_copy;
+	dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa2_qdma_submit;
 	dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
-	dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
 	dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
@@ -1718,5 +1631,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
-	"no_prefetch=<int> ");
+	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
+	DPAA2_QDMA_DESC_DEBUG"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 811906fcbc..371393cb85 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,11 +1,14 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
-#define DPAA2_QDMA_MAX_DESC		1024
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+#define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
@@ -13,48 +16,9 @@
 #define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
 #define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
 
-#define DPAA2_QDMA_MAX_FLE 3
-#define DPAA2_QDMA_MAX_SDD 2
-
-#define DPAA2_QDMA_MAX_SG_NB 64
-
-#define DPAA2_DPDMAI_MAX_QUEUES	1
-
-/** FLE single job pool size: job pointer(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor.
- */
-#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
-			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-/** FLE sg jobs pool size: job number(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor  +
- * 64 (src + dst) sg entries + 64 jobs pointers.
- */
-#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
-		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
-		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
-
-#define QDMA_FLE_JOB_NB_OFFSET 0
+#define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_FLE_SINGLE_JOB_OFFSET 0
-
-#define QDMA_FLE_FLE_OFFSET \
-		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
-
-#define QDMA_FLE_SDD_OFFSET \
-		(QDMA_FLE_FLE_OFFSET + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
-
-#define QDMA_FLE_SG_ENTRY_OFFSET \
-		(QDMA_FLE_SDD_OFFSET + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-#define QDMA_FLE_SG_JOBS_OFFSET \
-		(QDMA_FLE_SG_ENTRY_OFFSET + \
-		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
+#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
 
 /** FLE pool cache size */
 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
@@ -202,12 +166,49 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum {
+	DPAA2_QDMA_SDD_FLE,
+	DPAA2_QDMA_SRC_FLE,
+	DPAA2_QDMA_DST_FLE,
+	DPAA2_QDMA_MAX_FLE
+};
+
+enum {
+	DPAA2_QDMA_SRC_SDD,
+	DPAA2_QDMA_DST_SDD,
+	DPAA2_QDMA_MAX_SDD
+};
+
+struct qdma_cntx_fle_sdd {
+	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
+	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
+} __rte_packed;
+
+struct qdma_cntx_sg {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t job_nb;
+	uint16_t rsv[3];
+} __rte_packed;
+
+struct qdma_cntx_long {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	uint16_t cntx_idx;
+	uint16_t rsv[3];
+} __rte_packed;
+
+#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
 	TAILQ_ENTRY(dpaa2_qdma_device) next;
-	/** handle to DPDMAI object */
-	struct fsl_mc_io dpdmai;
 	/** HW ID for DPDMAI object */
 	uint32_t dpdmai_id;
 	/** Tocken of this device */
@@ -221,42 +222,30 @@ struct dpaa2_dpdmai_dev {
 	struct qdma_device *qdma_dev;
 };
 
-struct qdma_virt_queue;
-
-typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
-					const struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t *nb_jobs);
-typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
-					struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs);
-
-typedef int (qdma_dequeue_multijob_t)(
-				struct qdma_virt_queue *qdma_vq,
-				uint16_t *vq_id,
-				struct rte_dpaa2_qdma_job **job,
-				uint16_t nb_jobs);
+#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
+#define QDMA_CNTX_IDX_RING_MAX_FREE \
+	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
+struct qdma_cntx_idx_ring {
+	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
+	uint16_t start;
+	uint16_t tail;
+	uint16_t free_space;
+	uint16_t nb_in_ring;
+};
 
-typedef int (qdma_enqueue_multijob_t)(
-			struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs);
+#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
-	/** Status ring of the virtual queue */
-	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
-	uint8_t in_use;
-	/** States if this vq has exclusively associated hw queue */
-	uint8_t exclusive_hw_queue;
+	uint8_t fle_pre_populate;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
@@ -266,18 +255,18 @@ struct qdma_virt_queue {
 
 	uint16_t vq_id;
 	uint32_t flags;
+	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
+	uint16_t fd_idx;
+	struct qdma_cntx_idx_ring *ring_cntx_idx;
+
+	/**Used for silent enabled*/
+	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	uint16_t silent_idx;
 
-	struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
-	struct rte_mempool *job_pool;
 	int num_valid_jobs;
 
 	struct rte_dma_stats stats;
-
-	qdma_set_fd_t *set_fd;
-	qdma_get_job_t *get_job;
-
-	qdma_dequeue_multijob_t *dequeue_job;
-	qdma_enqueue_multijob_t *enqueue_job;
 };
 
 /** Represents a QDMA device. */
@@ -286,8 +275,7 @@ struct qdma_device {
 	struct qdma_virt_queue *vqs;
 	/** Total number of VQ's */
 	uint16_t num_vqs;
-	/** Device state - started or stopped */
-	uint8_t state;
+	uint8_t is_silent;
 };
 
 #endif /* _DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index b0bf9d8bcc..e49604c8fc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2023 NXP
  */
 
 #ifndef _RTE_PMD_DPAA2_QDMA_H_
@@ -7,118 +7,30 @@
 
 #include <rte_compat.h>
 
-/** States if the source addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_SRC_PHY		(1ULL << 30)
+#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
+#define RTE_DPAA2_QDMA_LEN_MASK \
+	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/** States if the destination addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
+#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
+	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
 
-/** Determines a QDMA job */
-struct rte_dpaa2_qdma_job {
-	/** Source Address from where DMA is (to be) performed */
-	uint64_t src;
-	/** Destination Address where DMA is (to be) done */
-	uint64_t dest;
-	/** Length of the DMA operation in bytes. */
-	uint32_t len;
-	/** See RTE_QDMA_JOB_ flags */
-	uint32_t flags;
-	/**
-	 * Status of the transaction.
-	 * This is filled in the dequeue operation by the driver.
-	 * upper 8bits acc_err for route by port.
-	 * lower 8bits fd error
-	 */
-	uint16_t status;
-	uint16_t vq_id;
-	uint64_t cnxt;
-	/**
-	 * FLE pool element maintained by user, in case no qDMA response.
-	 * Note: the address must be allocated from DPDK memory pool.
-	 */
-	void *usr_elem;
-};
+#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
+	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable FD in Ultra Short format on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
+	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable internal SG processing on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enqueue a copy operation onto the virtual DMA channel for silent mode,
- * when dequeue is not required.
- *
- * This queues up a copy operation to be performed by hardware, if the 'flags'
- * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
- * this operation, otherwise do not trigger doorbell.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs to be submitted to QDMA.
- * @param nb_cpls
- *   Number of DMA jobs.
- *
- * @return
- *   - >= 0..Number of enqueued job.
- *   - -ENOSPC: if no space left to enqueue.
- *   - other values < 0 on failure.
- */
-__rte_experimental
-int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Return the number of operations that have been successfully completed.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs completed by QDMA.
- * @param nb_cpls
- *   Number of completed DMA jobs.
- *
- * @return
- *   The number of operations that successfully completed. This return value
- *   must be less than or equal to the value of nb_cpls.
- */
-__rte_experimental
-uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
+#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
 
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
deleted file mode 100644
index 43e8b2d5c5..0000000000
--- a/drivers/dma/dpaa2/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-DPDK_25 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 22.07
-	rte_dpaa2_qdma_completed_multi;
-	rte_dpaa2_qdma_copy_multi;
-	rte_dpaa2_qdma_vchan_fd_us_enable;
-	rte_dpaa2_qdma_vchan_internal_sg_enable;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 03/15] bus/fslmc: enhance the qbman dq storage logic
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-08  7:22         ` [v4 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-10-08  7:22         ` [v4 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08  7:22         ` [v4 04/15] dma/dpaa2: add short FD support Gagandeep Singh
                           ` (11 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Multiple DQ storages are used among multiple cores, the single dq
storage of first union is leak if multiple storages are allocated.
It does not make sense to keep the single dq storage of union,
remove it and reuse the first dq storage of multiple storages
for this case.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c    | 25 ++-----
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     | 38 +++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 23 ++----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c |  4 +-
 drivers/dma/dpaa2/dpaa2_qdma.c              | 41 ++---------
 drivers/net/dpaa2/dpaa2_ethdev.c            | 81 ++++++++-------------
 drivers/net/dpaa2/dpaa2_rxtx.c              | 19 +++--
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c       |  4 +-
 9 files changed, 102 insertions(+), 140 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 7e858a113f..160126f6d6 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 		}
 
 		/* Allocate DQ storage for the DPCI Rx queues */
-		rxq = &(dpci_node->rx_queue[i]);
-		rxq->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_BUS_ERR("q_storage allocation failed");
-			ret = -ENOMEM;
+		rxq = &dpci_node->rx_queue[i];
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed");
-			goto err;
-		}
 	}
 
 	/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 
 err:
 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
-		struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
 
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 	rte_free(dpci_node);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 4aec7b2cd8..a8afc772fd 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
 	}
 }
 
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	int i = 0;
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
-		q_storage->dq_storage[i] = rte_malloc(NULL,
+		q_storage->dq_storage[i] = rte_zmalloc(NULL,
 			dpaa2_dqrr_size * sizeof(struct qbman_result),
 			RTE_CACHE_LINE_SIZE);
 		if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	}
 	return 0;
 fail:
-	while (--i >= 0)
+	while (--i >= 0) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
+	}
 
 	return -1;
 }
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..1ce481c88d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
 	uint64_t tx_pkts;
 	uint64_t err_pkts;
 	union {
-		struct queue_storage_info_t *q_storage;
+		/**Ingress*/
+		struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+		/**Egress*/
 		struct qbman_result *cscn;
 	};
 	struct rte_event ev;
@@ -186,6 +188,38 @@ struct swp_active_dqs {
 	uint64_t reserved[7];
 };
 
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+	int ret = 0, i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		(q)->q_storage[i] = rte_zmalloc(NULL, \
+			sizeof(struct queue_storage_info_t), \
+			RTE_CACHE_LINE_SIZE); \
+		if (!(q)->q_storage[i]) { \
+			ret = -ENOBUFS; \
+			break; \
+		} \
+		ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+		if (ret) \
+			break; \
+	} \
+	ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+	int i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		if ((q)->q_storage[i]) { \
+			dpaa2_free_dq_storage((q)->q_storage[i]); \
+			rte_free((q)->q_storage[i]); \
+			(q)->q_storage[i] = NULL; \
+		} \
+	} \
+})
+
 #define NUM_MAX_SWP 64
 
 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 2cdf9308f8..62e381cd1a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1893,7 +1893,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
@@ -1984,10 +1984,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (qp->rx_vq.q_storage) {
-		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
-		rte_free(qp->rx_vq.q_storage);
-	}
+	dpaa2_queue_storage_free(&qp->rx_vq, 1);
 	rte_mempool_free(qp->fle_pool);
 	rte_free(qp);
 
@@ -2038,18 +2035,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->rx_vq.crypto_data = dev->data;
 	qp->tx_vq.crypto_data = dev->data;
-	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
-		sizeof(struct queue_storage_info_t),
-		RTE_CACHE_LINE_SIZE);
-	if (!qp->rx_vq.q_storage) {
-		DPAA2_SEC_ERR("malloc failed for q_storage");
-		return -ENOMEM;
-	}
-	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
-	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
-		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
-		return -ENOMEM;
+	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+	if (retcode) {
+		dpaa2_queue_storage_free((&qp->rx_vq), 1);
+		return retcode;
 	}
 
 	dev->data->queue_pairs[qp_id] = qp;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 883584a6e2..fb0408f8ad 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
  */
 
 #include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 6c77dc32c4..3a6aa69e8b 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -862,7 +862,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		dpaa2_dqrr_size : nb_cpls;
 	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
 	fqid = rxq->fqid;
-	q_storage = rxq->q_storage;
+	q_storage = rxq->q_storage[0];
 
 	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
@@ -1070,13 +1070,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 				qdma_dev->vqs[i].ring_cntx_idx = NULL;
 			}
 			rxq = &dpdmai_dev->rx_queue[i];
-			if (rxq->q_storage) {
-				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
-					dev->data->dev_name, i);
-				dpaa2_free_dq_storage(rxq->q_storage);
-				rte_free(rxq->q_storage);
-				rxq->q_storage = NULL;
-			}
+			dpaa2_queue_storage_free(rxq, 1);
 		}
 		rte_free(qdma_dev->vqs);
 		qdma_dev->vqs = NULL;
@@ -1132,24 +1126,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		qdma_dev->vqs[i].vq_id = i;
 		rxq = &dpdmai_dev->rx_queue[i];
 		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq->q_storage = rte_zmalloc(NULL,
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto alloc_failed;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto alloc_failed;
-		}
 	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1160,11 +1139,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 alloc_failed:
 	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	rte_free(qdma_dev->vqs);
@@ -1358,11 +1333,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
 	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	if (qdma_dev->vqs) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 88374ea905..c411ad5a97 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	uint8_t num_rxqueue_per_tc;
 	struct dpaa2_queue *mc_q, *mcq;
 	uint32_t tot_queues;
-	int i;
+	int i, ret = 0;
 	struct dpaa2_queue *dpaa2_q;
 
 	PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < priv->nb_rx_queues; i++) {
 		mc_q->eth_data = dev->data;
 		priv->rx_vq[i] = mc_q++;
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_q->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
-			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+		dpaa2_q = priv->rx_vq[i];
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
 	}
 
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 		if (!priv->rx_err_vq)
 			goto fail;
 
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
-					sizeof(struct queue_storage_info_t) *
-					RTE_MAX_LCORE,
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
+		dpaa2_q = priv->rx_err_vq;
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		for (i = 0; i < RTE_MAX_LCORE; i++)
-			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
-				goto fail;
 	}
 
 	for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 			mc_q->tc_index = i;
 			mc_q->flow_id = 0;
 			priv->tx_conf_vq[i] = mc_q++;
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-			dpaa2_q->q_storage =
-				rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-			if (!dpaa2_q->q_storage)
-				goto fail_tx_conf;
-
-			memset(dpaa2_q->q_storage, 0,
-			       sizeof(struct queue_storage_info_t));
-			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+			dpaa2_q = priv->tx_conf_vq[i];
+			ret = dpaa2_queue_storage_alloc(dpaa2_q,
+					RTE_MAX_LCORE);
+			if (ret)
 				goto fail_tx_conf;
 		}
 	}
 
 	vq_id = 0;
 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
-		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+		mcq = priv->rx_vq[vq_id];
 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 		vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 fail_tx_conf:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->tx_conf_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->tx_conf_vq[i--] = NULL;
 	}
 	i = priv->nb_tx_queues;
 fail_tx:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+		dpaa2_q = priv->tx_vq[i];
 		rte_free(dpaa2_q->cscn);
 		priv->tx_vq[i--] = NULL;
 	}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	i -= 1;
 	mc_q = priv->rx_vq[0];
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->rx_vq[i--] = NULL;
 	}
 
 	if (dpaa2_enable_err_queue) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		if (dpaa2_q->q_storage)
-			dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_err_vq;
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 	}
 
 	rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 	if (priv->rx_vq[0]) {
 		/* cleaning up queue storage */
 		for (i = 0; i < priv->nb_rx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-			rte_free(dpaa2_q->q_storage);
+			dpaa2_q = priv->rx_vq[i];
+			dpaa2_queue_storage_free(dpaa2_q,
+				RTE_MAX_LCORE);
 		}
 		/* cleanup tx queue cscn */
 		for (i = 0; i < priv->nb_tx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+			dpaa2_q = priv->tx_vq[i];
 			rte_free(dpaa2_q->cscn);
 		}
 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
 			/* cleanup tx conf queue storage */
 			for (i = 0; i < priv->nb_tx_queues; i++) {
-				dpaa2_q = (struct dpaa2_queue *)
-						priv->tx_conf_vq[i];
-				rte_free(dpaa2_q->q_storage);
+				dpaa2_q = priv->tx_conf_vq[i];
+				dpaa2_queue_storage_free(dpaa2_q,
+					RTE_MAX_LCORE);
 			}
 		}
 		/*free memory for all queues (RX+TX) */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 92e9dd40dc..376291af04 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
 	if (unlikely(dpaa2_enable_err_queue))
 		dump_err_pkts(priv->rx_err_vq);
 
@@ -958,7 +960,7 @@ uint16_t
 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ */
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
 	struct qbman_pull_desc pulldesc;
 	struct qbman_eq_desc eqdesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
 	/* todo - currently we are using 1st TX queue only for loopback*/
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
 		ret = dpaa2_affine_qbman_ethrx_swp();
 		if (ret) {
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index de8c024abb..34a3c4f6af 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
  */
 
 #include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
 
 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
-	dq_storage = rxq->q_storage->dq_storage[0];
+	dq_storage = rxq->q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 04/15] dma/dpaa2: add short FD support
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (2 preceding siblings ...)
  2024-10-08  7:22         ` [v4 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08  7:22         ` [v4 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
                           ` (10 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Short FD can be used for single transfer scenario which shows higher
performance than FLE.
1) Save index context in FD att field for short and FLE(NonSG).
2) Identify FD type by att of FD.
3) Force 48 bits address for source address and fle according to spec.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |   2 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 314 +++++++++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  69 ++++--
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  13 -
 4 files changed, 286 insertions(+), 112 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index 079337e61c..a358434aca 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -81,3 +81,5 @@ Device Argumenst
    usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
 2. Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
    usage example: ``fslmc:dpdmai.1,desc_debug=1``
+2. Use dev arg option ``short_fd=1`` to enable short FDs.
+   usage example: ``fslmc:dpdmai.1,short_fd=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 3a6aa69e8b..23ecf4c5ac 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -16,6 +16,7 @@
 
 #define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
 #define DPAA2_QDMA_DESC_DEBUG "desc_debug"
+#define DPAA2_QDMA_USING_SHORT_FD "short_fd"
 
 static uint32_t dpaa2_coherent_no_alloc_cache;
 static uint32_t dpaa2_coherent_alloc_cache;
@@ -560,7 +561,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 	const struct qdma_cntx_fle_sdd *fle_sdd;
 	const struct qdma_sdd *sdd;
 	const struct qdma_cntx_sg *cntx_sg = NULL;
-	const struct qdma_cntx_long *cntx_long = NULL;
 
 	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
 	sdd = fle_sdd->sdd;
@@ -583,11 +583,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		QBMAN_FLE_WORD4_FMT_SGE) {
 		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
 			fle_sdd);
-	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
 		QBMAN_FLE_WORD4_FMT_SBF) {
-		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
-			fle_sdd);
-	} else {
 		DPAA2_QDMA_ERR("Unsupported fle format:%d",
 			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
 		return;
@@ -598,11 +595,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		dpaa2_qdma_sdd_dump(&sdd[i]);
 	}
 
-	if (cntx_long) {
-		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
-			cntx_long->cntx_idx);
-	}
-
 	if (cntx_sg) {
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
@@ -620,6 +612,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
 				cntx_sg->cntx_idx[i]);
 		}
+	} else {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx");
 	}
 }
 
@@ -682,7 +676,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		offsetof(struct qdma_cntx_sg, fle_sdd) +
 		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
@@ -718,6 +712,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
+	dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -734,74 +729,178 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static inline void
+qdma_populate_fd_pci(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd,
+	struct dpaa2_qdma_rbp *rbp, int ser)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits(src);
+	fd->simple_pci.saddr_hi = upper_32_bits(src);
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+
+	fd->simple_pci.sportid = rbp->sportid;
+
+	fd->simple_pci.svfid = rbp->svfid;
+	fd->simple_pci.spfid = rbp->spfid;
+	fd->simple_pci.svfa = rbp->svfa;
+	fd->simple_pci.dvfid = rbp->dvfid;
+	fd->simple_pci.dpfid = rbp->dpfid;
+	fd->simple_pci.dvfa = rbp->dvfa;
+
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits(dest);
+	fd->simple_pci.daddr_hi = upper_32_bits(dest);
+}
+
+static inline void
+qdma_populate_fd_ddr(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd, int ser)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(src);
+	fd->simple_ddr.saddr_hi = upper_32_bits(src);
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss.
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits(dest);
+	fd->simple_ddr.daddr_hi = upper_32_bits(dest);
+}
+
 static int
-dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
+dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long = NULL;
-	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) {
+		/** PCIe EP*/
+		qdma_populate_fd_pci(src,
+			dst, length,
+			fd, &qdma_vq->rbp,
+			is_silent ? 0 : 1);
+	} else {
+		/** DDR or PCIe RC*/
+		qdma_populate_fd_ddr(src,
+			dst, length,
+			fd, is_silent ? 0 : 1);
+	}
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_SHORT);
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
+	} else {
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
+	}
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
+{
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_fle_sdd *fle_sdd = NULL;
+	rte_iova_t fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	if (is_silent) {
+		fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
-			(void **)&cntx_long);
+			(void **)&fle_sdd);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_long);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
-#endif
+	fle = fle_sdd->fle;
+	fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset;
 
-	fle = cntx_long->fle_sdd.fle;
-	fle_iova = cntx_iova +
-		offsetof(struct qdma_cntx_long, fle_sdd) +
-		offsetof(struct qdma_cntx_fle_sdd, fle);
-
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)fle);
 
 	if (qdma_vq->fle_pre_populate) {
 		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
-			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+			fle_sdd_pre_populate(fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
-			if (!qdma_dev->is_silent && cntx_long) {
-				cntx_long->cntx_idx =
-					DPAA2_QDMA_IDX_FROM_FLAG(flags);
-			}
 		}
 
 		fle_post_populate(fle, src, dst, length);
 	} else {
-		sdd = cntx_long->fle_sdd.sdd;
-		sdd_iova = cntx_iova +
-			offsetof(struct qdma_cntx_long, fle_sdd) +
-			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		sdd = fle_sdd->sdd;
+		sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset;
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
 			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
-		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
+		dpaa2_qdma_long_fmt_dump(fle);
 
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_LONG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -809,15 +908,89 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
 }
 
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	if (qdma_vq->using_short_fd)
+		return dpaa2_qdma_short_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+	else
+		return dpaa2_qdma_long_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+}
+
+static inline int
+dpaa2_qdma_dq_fd(const struct qbman_fd *fd,
+	struct qdma_virt_queue *qdma_vq,
+	uint16_t *free_space, uint16_t *fle_elem_nb)
+{
+	uint16_t idx, att;
+	enum dpaa2_qdma_fd_type type;
+	int ret;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+
+	att = dpaa2_qdma_fd_get_att(fd);
+	type = DPAA2_QDMA_FD_ATT_TYPE(att);
+	if (type == DPAA2_QDMA_FD_SHORT) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_LONG) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_SG) {
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, free_space);
+		if (unlikely(ret < cntx_sg->job_nb))
+			return -ENOSPC;
+
+		return 0;
+	}
+
+	DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x",
+		fd->simple_ddr.rsv1_att);
+	return -EIO;
+}
+
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
@@ -837,10 +1010,6 @@ dpaa2_qdma_dequeue(void *dev_private,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	int ret, pull_size;
-	struct qbman_fle *fle;
-	struct qdma_cntx_fle_sdd *fle_sdd;
-	struct qdma_cntx_sg *cntx_sg;
-	struct qdma_cntx_long *cntx_long;
 	uint16_t free_space = 0, fle_elem_nb = 0;
 
 	if (unlikely(qdma_dev->is_silent))
@@ -939,25 +1108,8 @@ dpaa2_qdma_dequeue(void *dev_private,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
-		fle = fle_sdd->fle;
-		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
-		fle_elem_nb++;
-		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
-			QBMAN_FLE_WORD4_FMT_SGE) {
-			cntx_sg = container_of(fle_sdd,
-				struct qdma_cntx_sg, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				cntx_sg->cntx_idx,
-				cntx_sg->job_nb, &free_space);
-		} else {
-			cntx_long = container_of(fle_sdd,
-				struct qdma_cntx_long, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				&cntx_long->cntx_idx,
-				1, &free_space);
-		}
-		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
+		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -982,8 +1134,10 @@ dpaa2_qdma_dequeue(void *dev_private,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	rte_mempool_put_bulk(qdma_vq->fle_pool,
-		qdma_vq->fle_elem, fle_elem_nb);
+	if (fle_elem_nb > 0) {
+		rte_mempool_put_bulk(qdma_vq->fle_pool,
+			qdma_vq->fle_elem, fle_elem_nb);
+	}
 
 	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
 		cntx_idx, nb_cpls);
@@ -1212,11 +1366,14 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	else
 		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_USING_SHORT_FD))
+		qdma_dev->vqs[vchan].using_short_fd = 1;
+	else
+		qdma_dev->vqs[vchan].using_short_fd = 0;
+
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
-			    sizeof(struct qdma_cntx_long));
-
+	pool_size = sizeof(struct qdma_cntx_sg);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
 			DPAA2_QDMA_MAX_DESC * 2, pool_size,
 			512, 0, NULL, NULL, NULL, NULL,
@@ -1236,7 +1393,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 			return ret;
 		}
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
-				(void **)qdma_dev->vqs[vchan].cntx_long,
+				(void **)qdma_dev->vqs[vchan].cntx_fle_sdd,
 				DPAA2_QDMA_MAX_DESC);
 		if (ret) {
 			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
@@ -1603,5 +1760,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
 	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
-	DPAA2_QDMA_DESC_DEBUG"=<int>");
+	DPAA2_QDMA_DESC_DEBUG"=<int>"
+	DPAA2_QDMA_USING_SHORT_FD"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 371393cb85..0be65e1cc6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -12,17 +12,8 @@
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
-#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT		(1ULL << 0)
-#define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
-#define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
-
-/** FLE pool cache size */
-#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
-
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
 #define DPAA2_RBP_MEM_RW            0x0
@@ -40,9 +31,14 @@
 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
-#define MAX_HW_QUEUE_PER_CORE		64
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
+#define DPAA2_QDMA_FD_LONG_FORMAT 0x1
+#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
 
-#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+#define DPAA2_QDMA_BMT_ENABLE 0x1
+#define DPAA2_QDMA_BMT_DISABLE 0x0
 
 /** Source/Destination Descriptor */
 struct qdma_sdd {
@@ -99,8 +95,8 @@ struct qdma_sdd {
 #define QDMA_SG_SL_SHORT	0x1 /* short length */
 #define QDMA_SG_SL_LONG	0x0 /* long length */
 #define QDMA_SG_F	0x1 /* last sg entry */
-#define QDMA_SG_BMT_ENABLE 0x1
-#define QDMA_SG_BMT_DISABLE 0x0
+#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
+#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
 
 struct qdma_sg_entry {
 	uint32_t addr_lo;		/* address 0:31 */
@@ -166,6 +162,40 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum dpaa2_qdma_fd_type {
+	DPAA2_QDMA_FD_SHORT = 1,
+	DPAA2_QDMA_FD_LONG = 2,
+	DPAA2_QDMA_FD_SG = 3
+};
+
+#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_TYPE(att) \
+	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
+#define DPAA2_QDMA_FD_ATT_CNTX(att) \
+	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+
+static inline void
+dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
+	uint64_t addr)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
+	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
+}
+
+static inline void
+dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
+	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
+{
+	fd->simple_ddr.rsv1_att = job_idx |
+		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
+}
+
+static inline uint16_t
+dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
+{
+	return fd->simple_ddr.rsv1_att;
+}
+
 enum {
 	DPAA2_QDMA_SDD_FLE,
 	DPAA2_QDMA_SRC_FLE,
@@ -193,12 +223,6 @@ struct qdma_cntx_sg {
 	uint16_t rsv[3];
 } __rte_packed;
 
-struct qdma_cntx_long {
-	struct qdma_cntx_fle_sdd fle_sdd;
-	uint16_t cntx_idx;
-	uint16_t rsv[3];
-} __rte_packed;
-
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
 
@@ -241,6 +265,7 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	uint64_t fle_iova2va_offset;
 	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
@@ -252,6 +277,7 @@ struct qdma_virt_queue {
 	uint64_t num_enqueues;
 	/* Total number of dequeues from this VQ */
 	uint64_t num_dequeues;
+	uint64_t copy_num;
 
 	uint16_t vq_id;
 	uint32_t flags;
@@ -261,10 +287,11 @@ struct qdma_virt_queue {
 
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
-	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
 	uint16_t silent_idx;
 
 	int num_valid_jobs;
+	int using_short_fd;
 
 	struct rte_dma_stats stats;
 };
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index e49604c8fc..df21b39cae 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,19 +7,6 @@
 
 #include <rte_compat.h>
 
-#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
-#define RTE_DPAA2_QDMA_LEN_MASK \
-	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
-	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
-
-#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
-	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
-	((length) & RTE_DPAA2_QDMA_LEN_MASK)
-
 #define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
 #define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
 	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 05/15] dma/dpaa2: limit the max descriptor number
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (3 preceding siblings ...)
  2024-10-08  7:22         ` [v4 04/15] dma/dpaa2: add short FD support Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08  7:22         ` [v4 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
                           ` (9 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For non-SG format, the index is saved in FD with
DPAA2_QDMA_FD_ATT_TYPE_OFFSET(13) bits width.

The max descriptor number of ring is power of 2, so the
eventual max number is:
((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) / 2)

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 0be65e1cc6..250c83c83c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -8,8 +8,6 @@
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-#define DPAA2_QDMA_MAX_DESC		4096
-#define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
 #define DPAA2_DPDMAI_MAX_QUEUES	16
@@ -169,10 +167,15 @@ enum dpaa2_qdma_fd_type {
 };
 
 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_MAX_IDX \
+	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
-	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
+
+#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
+#define DPAA2_QDMA_MIN_DESC 1
 
 static inline void
 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
@@ -186,6 +189,7 @@ static inline void
 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
 {
+	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
 	fd->simple_ddr.rsv1_att = job_idx |
 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 06/15] dma/dpaa2: change the DMA copy return value
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (4 preceding siblings ...)
  2024-10-08  7:22         ` [v4 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08  7:22         ` [v4 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
                           ` (8 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The return value of DMA copy/sg copy should be index of
descriptor copied in success.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 23ecf4c5ac..180ffb3468 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -644,6 +644,11 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -ENOTSUP;
 	}
 
+	if (unlikely(!nb_src)) {
+		DPAA2_QDMA_ERR("No SG entry specified");
+		return -EINVAL;
+	}
+
 	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
 			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
@@ -720,10 +725,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num += nb_src;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num += nb_src;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 07/15] dma/dpaa2: move the qdma header to common place
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (5 preceding siblings ...)
  2024-10-08  7:22         ` [v4 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08  7:22         ` [v4 08/15] dma/dpaa: refactor driver Gagandeep Singh
                           ` (7 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include rte_pmd_dpaax_qdma.h instead of rte_pmd_dpaa2_qdma.h
and change code accordingly.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/api/doxy-api-index.md                 |  2 +-
 doc/api/doxy-api.conf.in                  |  2 +-
 drivers/common/dpaax/meson.build          |  3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h | 23 +++++++
 drivers/dma/dpaa2/dpaa2_qdma.c            | 84 +++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h            | 10 +--
 drivers/dma/dpaa2/meson.build             |  4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h    | 23 -------
 8 files changed, 72 insertions(+), 79 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f9f0300126..5a4411eb4a 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
   [mlx5](@ref rte_pmd_mlx5.h),
   [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
   [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
-  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+  [dpaax](@ref rte_pmd_dpaax_qdma.h),
   [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
   [dlb2](@ref rte_pmd_dlb2.h),
   [ifpga](@ref rte_pmd_ifpga.h)
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a8823c046f..33250d867c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -8,7 +8,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/cnxk \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/dma/dpaa2 \
+                          @TOPDIR@/drivers/common/dpaax \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/event/cnxk \
                           @TOPDIR@/drivers/mempool/cnxk \
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
index a162779116..db61b76ce3 100644
--- a/drivers/common/dpaax/meson.build
+++ b/drivers/common/dpaax/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
 
 if not is_linux
     build = false
@@ -16,3 +16,4 @@ endif
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
+headers = files('rte_pmd_dpaax_qdma.h')
diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
new file mode 100644
index 0000000000..2552a4adfb
--- /dev/null
+++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 180ffb3468..c36cf6cbe6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,7 +10,7 @@
 
 #include <mc/fsl_dpdmai.h>
 
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
@@ -251,16 +251,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 *
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
@@ -274,23 +274,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+	for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
 		/* source SG */
 		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		/* destination SG */
 		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 	}
 }
 
@@ -389,21 +387,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
 		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
 		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
@@ -483,17 +479,16 @@ fle_populate(struct qbman_fle fle[],
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
@@ -599,7 +594,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
 		if (!cntx_sg->job_nb ||
-			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
 			DPAA2_QDMA_ERR("Invalid SG job number:%d",
 				cntx_sg->job_nb);
 			return;
@@ -649,9 +644,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -EINVAL;
 	}
 
-	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+	if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
-			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+			nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
 		return -EINVAL;
 	}
 
@@ -670,11 +665,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+	cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
 
 	fle = cntx_sg->fle_sdd.fle;
 	fle_iova = cntx_iova +
@@ -706,8 +697,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			offsetof(struct qdma_cntx_sg, sg_src_entry);
 		dst_sge_iova = cntx_iova +
 			offsetof(struct qdma_cntx_sg, sg_dst_entry);
-		len = sg_entry_populate(src, dst,
-			cntx_sg, nb_src);
+		len = sg_entry_populate(src, dst, cntx_sg, nb_src);
 
 		fle_populate(fle, sdd, sdd_iova,
 			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1050,7 +1040,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 			q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			DPAA2_VADDR_TO_IOVA(dq_storage), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
 			       get_swp_active_dqs(
@@ -1085,7 +1075,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
-		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
 
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
@@ -1117,7 +1107,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
-		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -1170,11 +1160,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 		RTE_DMA_CAPA_SILENT |
 		RTE_DMA_CAPA_OPS_COPY |
 		RTE_DMA_CAPA_OPS_COPY_SG;
-	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
-	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1355,6 +1345,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	uint32_t pool_size;
 	char pool_name[64];
 	int ret;
+	uint64_t iova, va;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1390,6 +1381,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
+	iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+	va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+	qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
 
 	if (qdma_dev->is_silent) {
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 250c83c83c..0fd1debaf8 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
 
 struct qdma_cntx_sg {
 	struct qdma_cntx_fle_sdd fle_sdd;
-	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
 	uint16_t job_nb;
 	uint16_t rsv[3];
 } __rte_packed;
 
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
-	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
 
 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
-	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
 
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build
index a99151e2a5..a523f5edb4 100644
--- a/drivers/dma/dpaa2/meson.build
+++ b/drivers/dma/dpaa2/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
 
 if not is_linux
     build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
deleted file mode 100644
index df21b39cae..0000000000
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
-	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
-	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
-	(((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
-	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 08/15] dma/dpaa: refactor driver
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (6 preceding siblings ...)
  2024-10-08  7:22         ` [v4 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
@ 2024-10-08  7:22         ` Gagandeep Singh
  2024-10-08  7:23         ` [v4 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
                           ` (6 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:22 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch refactor the DPAA DMA driver code with changes:
 - HW descriptors rename and update with details.
 - update qdma engine and queue structures
 - using rte_ring APIs for enqueue and dequeue.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 1330 ++++++++++++++++++----------------
 drivers/dma/dpaa/dpaa_qdma.h |  222 +++---
 2 files changed, 864 insertions(+), 688 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 3d4fd818f8..a10a867580 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #include <bus_dpaa_driver.h>
@@ -8,69 +8,71 @@
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static uint32_t s_sg_max_entry_sz = 2000;
+
 static inline void
-qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
 {
 	ccdf->addr_hi = upper_32_bits(addr);
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+static inline void
+qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
 {
-	return ccdf->cfg8b_w1 & 0xff;
+	sge->addr_hi = upper_32_bits(addr);
+	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
+	uint8_t *queue_idx)
 {
-	return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_OFFSET;
-}
+	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
+
+	if (addr && queue_idx)
+		*queue_idx = ccdf->queue;
+	if (addr) {
+		ccdf->addr_hi = 0;
+		ccdf->addr_lo = 0;
+		return true;
+	}
 
-static inline void
-qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-{
-	ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
+	return false;
 }
 
 static inline int
-qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+ilog2(int x)
 {
-	return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_STATUS;
-}
+	int log = 0;
 
-static inline void
-qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
-{
-	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
+	x >>= 1;
+
+	while (x) {
+		log++;
+		x >>= 1;
+	}
+	return log;
 }
 
-static inline void
-qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qsize(uint32_t q_size)
 {
-	csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
+	return (ilog2(q_size) - ilog2(64));
 }
 
-static inline void
-qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qthld(uint32_t q_thld)
 {
-	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+	return (ilog2(q_thld) - ilog2(16));
 }
 
 static inline int
-ilog2(int x)
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
 {
-	int log = 0;
-
-	x >>= 1;
+	struct rte_dma_stats *stats = &fsl_queue->stats;
 
-	while (x) {
-		log++;
-		x >>= 1;
-	}
-	return log;
+	return (stats->submitted - stats->completed);
 }
 
 static u32
@@ -97,12 +99,12 @@ qdma_writel_be(u32 val, void *addr)
 	QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
-	virt_addr = rte_malloc("dma pool alloc", size, aligned);
+	virt_addr = rte_zmalloc(nm, size, aligned);
 	if (!virt_addr)
 		return NULL;
 
@@ -111,268 +113,221 @@ static void
 	return virt_addr;
 }
 
-static void
-dma_pool_free(void *addr)
-{
-	rte_free(addr);
-}
-
-static void
-fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
-	int id;
-
-	if (--fsl_queue->count)
-		goto finally;
-
-	id = (fsl_qdma->block_base - fsl_queue->block_base) /
-	      fsl_qdma->block_offset;
-
-	while (rte_atomic32_read(&wait_task[id]) == 1)
-		rte_delay_us(QDMA_DELAY);
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_used,	list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-finally:
-	fsl_qdma->desc_allocated--;
-}
-
-static void
-fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-				      dma_addr_t dst, dma_addr_t src, u32 len)
-{
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
-
-	/* Note: command table (fsl_comp->virt_addr) is getting filled
-	 * directly in cmd descriptors of queues while enqueuing the descriptor
-	 * please refer fsl_qdma_enqueue_desc
-	 * frame list table (virt_addr) + 1) and source,
-	 * destination descriptor table
-	 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
-	 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
-	 */
-	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-
-	/* Status notification is enqueued to status queue. */
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-}
-
 /*
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-					struct fsl_qdma_queue *queue,
-					int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *csgf_desc;
-	int i;
-
-	for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
-		comp_temp = rte_zmalloc("qdma: comp temp",
-					sizeof(*comp_temp), 0);
-		if (!comp_temp)
-			return -ENOMEM;
-
-		comp_temp->virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
-		if (!comp_temp->virt_addr) {
-			rte_free(comp_temp);
+	struct fsl_qdma_comp_cmd_desc *ccdf;
+	uint16_t i, j;
+	struct fsl_qdma_cmpd_ft *ft;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		dma_addr_t phy_ft = 0;
+
+		queue->ft[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_cmpd_ft),
+			RTE_CACHE_LINE_SIZE, &phy_ft);
+		if (!queue->ft[i])
+			goto fail;
+		if (((uint64_t)queue->ft[i]) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
+				i, queue->ft[i]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		comp_temp->desc_virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
-		if (!comp_temp->desc_virt_addr) {
-			rte_free(comp_temp->virt_addr);
-			rte_free(comp_temp);
+		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
+				i, &queue->ft[i]->desc_ssge[0]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(comp_temp->desc_virt_addr, 0,
-		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
-
-		csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
-		sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
-		ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+		queue->ft[i]->phy_ssge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
+		queue->ft[i]->phy_dsge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
+
+		ft = queue->ft[i];
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(csgf_desc, 32);
+		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
+
 		/* Descriptor Buffer */
-		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
-				FSL_QDMA_CMD_LWC_OFFSET);
-
-		list_add_tail(&comp_temp->list, &queue->comp_free);
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+
+		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
+		ddf->lwc = FSL_QDMA_CMD_LWC;
+
+		ccdf = &queue->cq[i];
+		qdma_desc_addr_set64(ccdf, phy_ft);
+		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
+
+		ccdf->queue = queue->queue_id;
 	}
+	queue->ci = 0;
 
 	return 0;
 
 fail:
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		rte_free(comp_temp->virt_addr);
-		rte_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
+	for (j = 0; j < i; j++)
+		rte_free(queue->ft[j]);
 
 	return -ENOMEM;
 }
 
-/*
- * Request a command descriptor for enqueue.
- */
-static struct fsl_qdma_comp *
-fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+static int
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+	int queue_id, int block_id)
 {
-	struct fsl_qdma_queue *queue = fsl_chan->queue;
-	struct fsl_qdma_comp *comp_temp;
-
-	if (!list_empty(&queue->comp_free)) {
-		comp_temp = list_first_entry(&queue->comp_free,
-					     struct fsl_qdma_comp,
-					     list);
-		list_del(&comp_temp->list);
-		return comp_temp;
+	struct fsl_qdma_queue *cmd_queue;
+	uint32_t queue_size;
+	char nm[RTE_MEMZONE_NAMESIZE];
+
+	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
+	cmd_queue->engine = fsl_qdma;
+
+	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
+		QDMA_QUEUE_SIZE;
+
+	sprintf(nm, "Command queue_%d_%d",
+		block_id, queue_id);
+	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
+		queue_size, &cmd_queue->bus_addr);
+	if (!cmd_queue->cq) {
+		DPAA_QDMA_ERR("%s alloc failed!", nm);
+		return -ENOMEM;
 	}
 
-	return NULL;
-}
-
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
-{
-	struct fsl_qdma_queue *queue_head, *queue_temp;
-	int len, i, j;
-	int queue_num;
-	int blocks;
-	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-
-	queue_num = fsl_qdma->n_queues;
-	blocks = fsl_qdma->num_blocks;
-
-	len = sizeof(*queue_head) * queue_num * blocks;
-	queue_head = rte_zmalloc("qdma: queue head", len, 0);
-	if (!queue_head)
-		return NULL;
+	cmd_queue->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
+	cmd_queue->queue_id = queue_id;
+	cmd_queue->block_id = block_id;
+	cmd_queue->pending_start = 0;
+	cmd_queue->pending_num = 0;
+	cmd_queue->complete_start = 0;
+
+	sprintf(nm, "Compound Table_%d_%d",
+		block_id, queue_id);
+	cmd_queue->ft = rte_zmalloc(nm,
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!cmd_queue->ft) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "Pending_desc_%d_%d",
+		block_id, queue_id);
+	cmd_queue->pending_desc = rte_zmalloc(nm,
+		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
+	if (!cmd_queue->pending_desc) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-burst_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_burst = rte_ring_create(nm,
+		QDMA_QUEUE_SIZE * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_burst) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_desc = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_desc) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-pool-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_pool = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_pool) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_desc);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
 
-	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
-		queue_size[i] = QDMA_QUEUE_SIZE;
+	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
+	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
 
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-				DPAA_QDMA_ERR("Get wrong queue-sizes.");
-				goto fail;
-			}
-			queue_temp = queue_head + i + (j * queue_num);
-
-			queue_temp->cq =
-			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-				       queue_size[i],
-				       sizeof(struct fsl_qdma_format) *
-				       queue_size[i], &queue_temp->bus_addr);
-
-			if (!queue_temp->cq)
-				goto fail;
-
-			memset(queue_temp->cq, 0x0, queue_size[i] *
-			       sizeof(struct fsl_qdma_format));
-
-			queue_temp->block_base = fsl_qdma->block_base +
-				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-			queue_temp->n_cq = queue_size[i];
-			queue_temp->id = i;
-			queue_temp->count = 0;
-			queue_temp->pending = 0;
-			queue_temp->virt_head = queue_temp->cq;
-			queue_temp->stats = (struct rte_dma_stats){0};
-		}
-	}
-	return queue_head;
+	return 0;
+}
 
-fail:
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			queue_temp = queue_head + i + (j * queue_num);
-			dma_pool_free(queue_temp->cq);
-		}
-	}
-	rte_free(queue_head);
+static void
+fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
+{
+	rte_free(queue->ft);
+	rte_free(queue->cq);
+	rte_free(queue->pending_desc);
+	rte_ring_free(queue->complete_burst);
+	rte_ring_free(queue->complete_desc);
+	rte_ring_free(queue->complete_pool);
+}
 
-	return NULL;
+static void
+fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
+{
+	rte_free(queue->cq);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+static int
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+	uint32_t block_id)
 {
-	struct fsl_qdma_queue *status_head;
-	unsigned int status_size;
+	struct fsl_qdma_status_queue *status;
+	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE;
-	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-		DPAA_QDMA_ERR("Get wrong status_size.");
-		return NULL;
-	}
+	status = &fsl_qdma->stat_queues[block_id];
+	status->engine = fsl_qdma;
 
-	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
-	if (!status_head)
-		return NULL;
+	status_size = QDMA_STATUS_SIZE *
+		sizeof(struct fsl_qdma_comp_cmd_desc);
 
-	/*
-	 * Buffer for queue command
-	 */
-	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 &status_head->bus_addr);
-
-	if (!status_head->cq) {
-		rte_free(status_head);
-		return NULL;
-	}
+	status->cq = dma_pool_alloc(NULL, status_size,
+		status_size, &status->bus_addr);
+
+	if (!status->cq)
+		return -ENOMEM;
 
-	memset(status_head->cq, 0x0, status_size *
-	       sizeof(struct fsl_qdma_format));
-	status_head->n_cq = status_size;
-	status_head->virt_head = status_head->cq;
+	memset(status->cq, 0x0, status_size);
+	status->n_cq = QDMA_STATUS_SIZE;
+	status->complete = 0;
+	status->block_id = block_id;
+	status->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	return status_head;
+	return 0;
 }
 
 static int
@@ -420,59 +375,41 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
-				 uint16_t *last_idx,
-				 enum rte_dma_status_code *status)
+static void
+fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
+	uint8_t num, struct fsl_qdma_queue *fsl_queue)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_queue *temp_queue;
-	struct fsl_qdma_format *status_addr;
-	struct fsl_qdma_comp *fsl_comp = NULL;
-	u32 reg, i;
-	int count = 0;
-
-	while (count < nb_cpls) {
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			return count;
-
-		status_addr = fsl_status->virt_head;
-
-		i = qdma_ccdf_get_queue(status_addr) +
-			id * fsl_qdma->n_queues;
-		temp_queue = fsl_queue + i;
-		fsl_comp = list_first_entry(&temp_queue->comp_used,
-					    struct fsl_qdma_comp,
-					    list);
-		list_del(&fsl_comp->list);
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
-		*last_idx = fsl_comp->index;
-		if (status != NULL)
-			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
-
-		list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
-		count++;
-
+	uint32_t i, j;
+	uint8_t *v_src, *v_dst;
+	char err_msg[512];
+	int offset;
+
+
+	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
+		fsl_queue->block_id,
+		fsl_queue->queue_id);
+	for (i = 0; i < num; i++) {
+		v_src = rte_mem_iova2virt(desc[i]->src);
+		v_dst = rte_mem_iova2virt(desc[i]->dst);
+		for (j = 0; j < desc[i]->len; j++) {
+			if (v_src[j] != v_dst[j]) {
+				sprintf(&err_msg[offset],
+					"job[%"PRIu64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
+					desc[i]->flag, v_src, j, v_src[j],
+					v_dst, j, v_dst[j]);
+				DPAA_QDMA_ERR("%s, stop validating!",
+					err_msg);
+				return;
+			}
+		}
 	}
-	return count;
 }
 
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *temp;
+	struct fsl_qdma_status_queue *temp_stat;
 	void *ctrl = fsl_qdma->ctrl_base;
 	void *block;
 	u32 i, j;
@@ -489,8 +426,8 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		for (i = 0; i < fsl_qdma->n_queues; i++) {
-			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+		for (i = 0; i < QDMA_QUEUES; i++) {
+			temp = &fsl_qdma->cmd_queues[j][i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -531,18 +468,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEEPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEPAR);
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEDPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQDPAR);
+		temp_stat = &fsl_qdma->stat_queues[j];
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEEPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEPAR);
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEDPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -551,7 +485,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+		val = ilog2_qsize(temp_stat->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -563,158 +497,389 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static void *
-fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
-			   dma_addr_t src, size_t len,
-			   void *call_back,
-			   void *param)
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
 {
-	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
 
-	fsl_comp =
-	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
-	if (!fsl_comp)
-		return NULL;
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	fsl_comp->qchan = fsl_chan;
-	fsl_comp->call_back_func = call_back;
-	fsl_comp->params = param;
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
 
-	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-	return (void *)fsl_comp;
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret)
+				rte_panic("DQ desc number failed!\n");
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				rte_panic("DQ %d descs failed!(%d)\n",
+					*dq_complete, ret);
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				rte_panic("Failed desc eq %d!=%d to %s\n",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
-				  struct fsl_qdma_comp *fsl_comp,
-				  uint64_t flags)
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	uint16_t num)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	struct fsl_qdma_format *ccdf;
-	u32 reg;
+	uint16_t i, idx, start, dq;
+	int ret, dq_cnt;
+
 
-	/* retrieve and store the register value in big endian
-	 * to avoid bits swap
-	 */
-	reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
-		return -1;
-
-	/* filling descriptor  command table */
-	ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
-	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
-	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
-	fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
-	fsl_queue->virt_head++;
-
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
-		fsl_queue->virt_head = fsl_queue->cq;
-
-	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
-
-	if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->stats.submitted++;
-	} else {
-		fsl_queue->pending++;
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+eq_again:
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		dq_cnt = 0;
+dq_again:
+		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
+			fsl_queue->block_id);
+		dq_cnt++;
+		if (dq > 0) {
+			goto eq_again;
+		} else {
+			if (dq_cnt < 100)
+				goto dq_again;
+			DPAA_QDMA_ERR("%s: Dq block%d failed!",
+				__func__, fsl_queue->block_id);
+		}
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			rte_panic("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0;
+	uint16_t blk_drain, check_num, drain_num;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
+
+	check_num = 0;
+overflow_check:
+	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+
+	if (likely(!overflow))
+		return 0;
+
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
+		drain_num++;
+		if (drain_num >= 10000) {
+			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			drain_num = 0;
+		}
+		goto drain_again;
+	}
+	check_num++;
+	if (check_num >= 1000) {
+		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
+		check_num = 0;
 	}
-	return fsl_comp->index;
+	goto overflow_check;
+
+	return 0;
 }
 
 static int
-fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
 
-	if (fsl_queue->count++)
-		goto finally;
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
 
-	INIT_LIST_HEAD(&fsl_queue->comp_free);
-	INIT_LIST_HEAD(&fsl_queue->comp_used);
+	ft = fsl_queue->ft[fsl_queue->ci];
 
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-				FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
-	if (ret) {
-		DPAA_QDMA_ERR(
-			"failed to alloc dma buffer for comp descriptor");
-		goto exit;
-	}
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
 
-finally:
-	return fsl_qdma->desc_allocated++;
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
 
-exit:
-	return -ENOMEM;
+	return 0;
 }
 
 static int
-dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	      uint32_t info_sz)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	uint32_t total_len;
+	uint16_t start, idx, num, i, next_idx;
+	int ret;
 
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+eq_sg:
+	total_len = 0;
+	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
+		fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num--;
+		}
+		if (fsl_queue->pending_num > 0)
+			goto eq_sg;
 
-	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 1;
-	dev_info->max_desc = DPAADMA_MAX_DESC;
-	dev_info->min_desc = DPAADMA_MIN_DESC;
+		return ret;
+	}
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
+		num = FSL_QDMA_SG_MAX_ENTRY;
+	else
+		num = fsl_queue->pending_num;
+
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+
+	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
+	csgf_src->extion = 1;
+	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
+	csgf_dest->extion = 1;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
+			fsl_queue->pending_desc[idx].src);
+		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_ssge[i].final = 0;
+		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
+			fsl_queue->pending_desc[idx].dst);
+		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_dsge[i].final = 0;
+		total_len += fsl_queue->pending_desc[idx].len;
+		if ((i + 1) != num) {
+			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
+			if (fsl_queue->pending_desc[next_idx].len >
+				s_sg_max_entry_sz) {
+				num = i + 1;
+				break;
+			}
+		}
+	}
+
+	ft->desc_ssge[num - 1].final = 1;
+	ft->desc_dsge[num - 1].final = 1;
+	csgf_src->length = total_len;
+	csgf_dest->length = total_len;
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
+	if (ret)
+		return ret;
+
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	fsl_queue->pending_start =
+		(start + num) & (fsl_queue->pending_max - 1);
+	fsl_queue->pending_num -= num;
+	if (fsl_queue->pending_num > 0)
+		goto eq_sg;
 
 	return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 {
-	u32 i, start, end;
+	uint16_t start = fsl_queue->pending_start;
 	int ret;
 
-	start = fsl_qdma->free_block_id * QDMA_QUEUES;
-	fsl_qdma->free_block_id++;
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
 
-	end = start + 1;
-	for (i = start; i < end; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+static int
+dpaa_qdma_info_get(const struct rte_dma_dev *dev,
+	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
-		if (fsl_chan->free) {
-			fsl_chan->free = false;
-			ret = fsl_qdma_alloc_chan_resources(fsl_chan);
-			if (ret)
-				return ret;
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= DPAA_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = fsl_qdma->n_queues;
+	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
+	dev_info->min_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
 
-			fsl_qdma->vchan_map[vchan] = i;
-			return 0;
+	return 0;
+}
+
+static int
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+	uint16_t vchan)
+{
+	int ret, i, j, found = 0;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	if (fsl_queue) {
+		found = 1;
+		goto queue_found;
+	}
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_queue = &fsl_qdma->cmd_queues[i][j];
+
+			if (fsl_queue->channel_id == vchan) {
+				found = 1;
+				fsl_qdma->chan[vchan] = fsl_queue;
+				goto queue_found;
+			}
 		}
 	}
 
-	return -1;
-}
+queue_found:
+	if (!found)
+		return -ENXIO;
 
-static void
-dma_release(void *fsl_chan)
-{
-	((struct fsl_qdma_chan *)fsl_chan)->free = true;
-	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+	if (fsl_queue->used)
+		return 0;
+
+	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
+	if (ret)
+		return ret;
+
+	fsl_queue->used = 1;
+	fsl_qdma->block_queues[fsl_queue->block_id]++;
+
+	return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-		    __rte_unused const struct rte_dma_conf *dev_conf,
-		    __rte_unused uint32_t conf_sz)
+	__rte_unused const struct rte_dma_conf *dev_conf,
+	__rte_unused uint32_t conf_sz)
 {
 	return 0;
 }
@@ -745,148 +910,112 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	u32 reg;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->pending--;
-		fsl_queue->stats.submitted++;
-	}
+	if (!fsl_queue->pending_num)
+		return 0;
 
-	return 0;
+	return fsl_qdma_enqueue_desc(fsl_queue);
 }
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-		  rte_iova_t src, rte_iova_t dst,
-		  uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	uint16_t start = fsl_queue->pending_start;
+	uint8_t pending = fsl_queue->pending_num;
+	uint16_t idx;
 	int ret;
 
-	void *fsl_comp = NULL;
-
-	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
-			(dma_addr_t)dst, (dma_addr_t)src,
-			length, NULL, NULL);
-	if (!fsl_comp) {
-		DPAA_QDMA_DP_DEBUG("fsl_comp is NULL");
-		return -1;
+	if (pending >= fsl_queue->pending_max) {
+		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
+			pending, vchan);
+		return -ENOSPC;
 	}
-	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	idx = (start + pending) & (fsl_queue->pending_max - 1);
+
+	fsl_queue->pending_desc[idx].src = src;
+	fsl_queue->pending_desc[idx].dst = dst;
+	fsl_queue->pending_desc[idx].flag =
+		DPAA_QDMA_IDX_FROM_FLAG(flags);
+	fsl_queue->pending_desc[idx].len = length;
+	fsl_queue->pending_num++;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
 
 	return ret;
 }
 
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			 const uint16_t nb_cpls, uint16_t *last_idx,
-			 enum rte_dma_status_code *st)
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
+
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, st);
-	fsl_queue->stats.completed += intr;
 
-	return intr;
+	return dq_num;
 }
 
-
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-		  uint16_t vchan, const uint16_t nb_cpls,
-		  uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
-	}
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, NULL);
-	fsl_queue->stats.completed += intr;
+	*has_error = false;
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
 
-	return intr;
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	return dq_num;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -903,17 +1032,15 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	fsl_queue->stats = (struct rte_dma_stats){0};
+	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
 	return 0;
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
-	.dev_info_get		  = dpaa_info_get,
+	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
@@ -926,90 +1053,80 @@ static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan;
 	uint64_t phys_addr;
-	unsigned int len;
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i;
+	uint32_t i, j, k;
 
-	fsl_qdma->desc_allocated = 0;
-	fsl_qdma->n_chans = VIRT_CHANNELS;
-	fsl_qdma->n_queues = QDMA_QUEUES;
+	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
 
-	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
-	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
-	if (!fsl_qdma->chans)
-		return -1;
-
-	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
-	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
-	if (!fsl_qdma->status) {
-		rte_free(fsl_qdma->chans);
-		return -1;
-	}
-
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		rte_atomic32_init(&wait_task[i]);
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
-		if (!fsl_qdma->status[i])
-			goto err;
-	}
-
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		goto err;
+		return ccsr_qdma_fd;
 	}
 
-	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 	phys_addr = QDMA_CCSR_BASE;
-	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-					 PROT_WRITE, MAP_SHARED,
-					 ccsr_qdma_fd, phys_addr);
+	fsl_qdma->reg_base = mmap(NULL, regs_size,
+		PROT_READ | PROT_WRITE, MAP_SHARED,
+		ccsr_qdma_fd, phys_addr);
 
 	close(ccsr_qdma_fd);
-	if (fsl_qdma->ctrl_base == MAP_FAILED) {
-		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-		       "size %d", phys_addr, regs_size);
-		goto err;
+	if (fsl_qdma->reg_base == MAP_FAILED) {
+		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIx64"), size(%d)",
+			phys_addr, regs_size);
+		return -ENOMEM;
 	}
 
-	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
-	if (!fsl_qdma->queue) {
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+	fsl_qdma->ctrl_base =
+		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+	fsl_qdma->status_base =
+		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+	fsl_qdma->block_base =
+		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (ret)
+			goto mem_free;
 	}
 
-	for (i = 0; i < fsl_qdma->n_chans; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		fsl_chan->qdma = fsl_qdma;
-		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-							fsl_qdma->num_blocks);
-		fsl_chan->free = true;
+	k = 0;
+	for (i = 0; i < QDMA_QUEUES; i++) {
+		for (j = 0; j < QDMA_BLOCKS; j++) {
+			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
+			if (ret)
+				goto mem_free;
+			fsl_qdma->cmd_queues[j][i].channel_id = k;
+			k++;
+		}
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.");
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+		goto mem_free;
 	}
 
 	return 0;
 
-err:
-	rte_free(fsl_qdma->chans);
-	rte_free(fsl_qdma->status);
+mem_free:
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
+
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
 
-	return -1;
+	munmap(fsl_qdma->ctrl_base, regs_size);
+
+	return ret;
 }
 
 static int
@@ -1052,17 +1169,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+	uint32_t i, j, regs_size;
+
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 
-	for (i = 0; i < max; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+	for (i = 0; i < QDMA_BLOCKS; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-		if (fsl_chan->free == false)
-			dma_release(fsl_chan);
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
 	}
 
-	rte_free(fsl_qdma->status);
-	rte_free(fsl_qdma->chans);
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 7e9e76e21a..75c014f32f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #ifndef _DPAA_QDMA_H_
@@ -11,7 +11,6 @@
 #define BIT(nr)		(1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES	5
 
 #ifndef GENMASK
@@ -20,6 +19,14 @@
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
 #define FSL_QDMA_DEDR			0xe04
@@ -54,15 +61,16 @@
 #define FSL_QDMA_QUEUE_MAX		8
 
 #define FSL_QDMA_BCQMR_EN		0x80000000
-#define FSL_QDMA_BCQMR_EI_BE		0x40
+#define FSL_QDMA_BCQMR_EI		0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
-#define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -75,23 +83,14 @@
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
 
+#define FSL_QDMA_COMP_SG_FORMAT		0x1
+
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
 
-#define QDMA_CCDF_STATUS		20
-#define QDMA_CCDF_OFFSET		20
-#define QDMA_CCDF_MASK			GENMASK(28, 20)
-#define QDMA_CCDF_FOTMAT		BIT(29)
-#define QDMA_CCDF_SER			BIT(30)
-
-#define QDMA_SG_FIN			BIT(30)
-#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
-
-#define COMMAND_QUEUE_OVERFLOW		10
-
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE			64
 #define QDMA_STATUS_SIZE		64
@@ -101,6 +100,7 @@
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
 #define QDMA_DELAY			1000
+#define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -118,89 +118,145 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
 	(((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
-struct fsl_qdma_format {
-	__le32 status; /* ser, status */
-	__le32 cfg;	/* format, offset */
-	union {
-		struct {
-			__le32 addr_lo;	/* low 32-bits of 40-bit address */
-			u8 addr_hi;	/* high 8-bits of 40-bit address */
-			u8 __reserved1[2];
-			u8 cfg8b_w1; /* dd, queue */
-		};
-		__le64 data;
-	};
-};
+struct fsl_qdma_comp_cmd_desc {
+	uint8_t status;
+	uint32_t rsv0:22;
+	uint32_t ser:1;
+	uint32_t rsv1:21;
+	uint32_t offset:9;
+	uint32_t format:3;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint16_t rsv3;
+	uint8_t queue:3;
+	uint8_t rsv4:3;
+	uint8_t dd:2;
+} __rte_packed;
+
+struct fsl_qdma_comp_sg_desc {
+	uint32_t offset:13;
+	uint32_t rsv0:19;
+	uint32_t length:30;
+	uint32_t final:1;
+	uint32_t extion:1;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint32_t rsv1:24;
+} __rte_packed;
 
-/* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	__le32 rev3;
-	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	__le32 rev5;
-	__le32 cmd;
-};
+	uint32_t rsv0;
+	uint32_t ssd:12;
+	uint32_t sss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint32_t rsv3:17;
+	uint32_t prefetch:1;
+	uint32_t rsv4:1;
+	uint32_t ssen:1;
+	uint32_t rthrotl:4;
+	uint32_t sqos:3;
+	uint32_t ns:1;
+	uint32_t srttype:4;
+} __rte_packed;
 
-/* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	__le32 rev1;
-	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	__le32 rev3;
-	__le32 cmd;
+	uint32_t rsv0;
+	uint32_t dsd:12;
+	uint32_t dss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint16_t rsv3;
+	uint32_t lwc:2;
+	uint32_t rsv4:1;
+	uint32_t dsen:1;
+	uint32_t wthrotl:4;
+	uint32_t dqos:3;
+	uint32_t ns:1;
+	uint32_t dwttype:4;
+} __rte_packed;
+
+struct fsl_qdma_df {
+	struct fsl_qdma_sdf sdf;
+	struct fsl_qdma_ddf ddf;
 };
 
-struct fsl_qdma_chan {
-	struct fsl_qdma_engine	*qdma;
-	struct fsl_qdma_queue	*queue;
-	bool			free;
-	struct list_head	list;
+#define FSL_QDMA_SG_MAX_ENTRY 64
+#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE)
+struct fsl_qdma_cmpd_ft {
+	struct fsl_qdma_comp_sg_desc desc_buf;
+	struct fsl_qdma_comp_sg_desc desc_sbuf;
+	struct fsl_qdma_comp_sg_desc desc_dbuf;
+	uint64_t cache_align[2];
+	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
+	uint64_t phy_ssge;
+	uint64_t phy_dsge;
+	uint64_t phy_df;
+} __rte_packed;
+
+#define DPAA_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
+
+struct fsl_qdma_desc {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint64_t flag;
+	uint64_t len;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format	*virt_head;
-	struct list_head	comp_used;
-	struct list_head	comp_free;
-	dma_addr_t		bus_addr;
-	u32			n_cq;
-	u32			id;
-	u32			count;
-	u32			pending;
-	struct fsl_qdma_format	*cq;
-	void			*block_base;
-	struct rte_dma_stats	stats;
+	int used;
+	struct fsl_qdma_cmpd_ft **ft;
+	uint16_t ci;
+	struct rte_ring *complete_burst;
+	struct rte_ring *complete_desc;
+	struct rte_ring *complete_pool;
+	uint16_t n_cq;
+	uint8_t block_id;
+	uint8_t queue_id;
+	uint8_t channel_id;
+	void *block_vir;
+	uint32_t le_cqmr;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t desc_in_hw[QDMA_QUEUE_SIZE];
+	struct rte_dma_stats stats;
+	struct fsl_qdma_desc *pending_desc;
+	uint16_t pending_max;
+	uint16_t pending_start;
+	uint16_t pending_num;
+	uint16_t complete_start;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
-struct fsl_qdma_comp {
-	dma_addr_t		bus_addr;
-	dma_addr_t		desc_bus_addr;
-	void			*virt_addr;
-	int			index;
-	void			*desc_virt_addr;
-	struct fsl_qdma_chan	*qchan;
-	dma_call_back		call_back_func;
-	void			*params;
-	struct list_head	list;
+struct fsl_qdma_status_queue {
+	uint16_t n_cq;
+	uint16_t complete;
+	uint8_t block_id;
+	void *block_vir;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	struct rte_dma_stats stats;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
 struct fsl_qdma_engine {
-	int			desc_allocated;
-	void			*ctrl_base;
-	void			*status_base;
-	void			*block_base;
-	u32			n_chans;
-	u32			n_queues;
-	int			error_irq;
-	struct fsl_qdma_queue	*queue;
-	struct fsl_qdma_queue	**status;
-	struct fsl_qdma_chan	*chans;
-	u32			num_blocks;
-	u8			free_block_id;
-	u32			vchan_map[4];
-	int			block_offset;
+	void *reg_base;
+	void *ctrl_base;
+	void *status_base;
+	void *block_base;
+	uint32_t n_queues;
+	uint8_t block_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES];
+	struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
+	uint32_t num_blocks;
+	int block_offset;
 };
 
-static rte_atomic32_t wait_task[CORE_NUMBER];
-
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 09/15] dma/dpaa: support burst capacity API
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (7 preceding siblings ...)
  2024-10-08  7:22         ` [v4 08/15] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-10-08  7:23         ` Gagandeep Singh
  2024-10-08  7:23         ` [v4 10/15] dma/dpaa: add silent mode support Gagandeep Singh
                           ` (5 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:23 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch improves the dpaa qdma driver and
adds dpaa_qdma_burst_capacity API which returns the
remaining space in the descriptor ring.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index a10a867580..94be9c5fd1 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1039,6 +1039,15 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
+{
+	const struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	return fsl_queue->pending_max - fsl_queue->pending_num;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
@@ -1152,6 +1161,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
+	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
 	ret = dpaa_qdma_init(dmadev);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 10/15] dma/dpaa: add silent mode support
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (8 preceding siblings ...)
  2024-10-08  7:23         ` [v4 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
@ 2024-10-08  7:23         ` Gagandeep Singh
  2024-10-08  7:23         ` [v4 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
                           ` (4 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:23 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add silent mode support.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 46 ++++++++++++++++++++++++++++++++----
 drivers/dma/dpaa/dpaa_qdma.h |  1 +
 2 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 94be9c5fd1..041446b5bc 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -119,6 +119,7 @@ dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 static int
 fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
+	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_comp_cmd_desc *ccdf;
@@ -173,7 +174,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		ccdf = &queue->cq[i];
 		qdma_desc_addr_set64(ccdf, phy_ft);
 		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
-
+		if (!fsl_qdma->is_silent)
+			ccdf->ser = 1;
 		ccdf->queue = queue->queue_id;
 	}
 	queue->ci = 0;
@@ -575,9 +577,12 @@ static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	uint16_t num)
 {
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 	uint16_t i, idx, start, dq;
 	int ret, dq_cnt;
 
+	if (fsl_qdma->is_silent)
+		return 0;
 
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
 eq_again:
@@ -622,17 +627,34 @@ static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
 	int overflow = 0;
+	uint32_t reg;
 	uint16_t blk_drain, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
-	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+	if (fsl_qdma->is_silent) {
+		reg = qdma_readl_be(block +
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
 			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
 
-	if (likely(!overflow))
+	if (likely(!overflow)) {
 		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num >= 10000) {
+			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
+			check_num = 0;
+		}
+		goto overflow_check;
+	}
 
 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
@@ -877,10 +899,13 @@ dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 }
 
 static int
-dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-	__rte_unused const struct rte_dma_conf *dev_conf,
+dpaa_qdma_configure(struct rte_dma_dev *dmadev,
+	const struct rte_dma_conf *dev_conf,
 	__rte_unused uint32_t conf_sz)
 {
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+
+	fsl_qdma->is_silent = dev_conf->enable_silent;
 	return 0;
 }
 
@@ -966,6 +991,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in slient mode");
+
+		return 0;
+	}
+
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
 			fsl_queue->block_id);
 	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
@@ -995,6 +1026,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in slient mode");
+
+		return 0;
+	}
 
 	*has_error = false;
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 75c014f32f..9b69db517e 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -257,6 +257,7 @@ struct fsl_qdma_engine {
 	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
 	uint32_t num_blocks;
 	int block_offset;
+	int is_silent;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 11/15] dma/dpaa: add workaround for ERR050757
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (9 preceding siblings ...)
  2024-10-08  7:23         ` [v4 10/15] dma/dpaa: add silent mode support Gagandeep Singh
@ 2024-10-08  7:23         ` Gagandeep Singh
  2024-10-08  7:23         ` [v4 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
                           ` (3 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:23 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

ERR050757 on LS104x indicates:

For outbound PCIe read transactions, a completion buffer is used
to store the PCIe completions till the data is passed back to the
initiator. At most 16 outstanding transactions are allowed and
maximum read request is 256 bytes. The completion buffer size
inside the controller needs to be at least 4KB, but the PCIe
controller has 3 KB of buffer. In case the size of pending
outbound read transactions of more than 3KB, the PCIe controller
may drop the incoming completions without notifying the initiator
of the transaction, leaving transactions unfinished. All
subsequent outbound reads to PCIe are blocked permanently.
To avoid qDMA hang as it keeps waiting for data that was silently
dropped, set stride mode for qDMA.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       |  3 ++-
 doc/guides/dmadevs/dpaa.rst  |  2 ++
 drivers/dma/dpaa/dpaa_qdma.c | 38 +++++++++++++++++++++++++++++++++---
 drivers/dma/dpaa/dpaa_qdma.h | 19 +++++++-----------
 4 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 012935d5d7..f81e466318 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -468,7 +468,8 @@ soc_dpaa = {
         ['RTE_MACHINE', '"dpaa"'],
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
-        ['RTE_MAX_NUMA_NODES', 1]
+        ['RTE_MAX_NUMA_NODES', 1],
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index f99bfc6087..746919ec6b 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -42,6 +42,8 @@ Compilation
 For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
+- ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+
 Initialization
 --------------
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 041446b5bc..dbc53b784f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,7 +167,6 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -449,8 +448,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
-			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
+			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+			temp->le_cqmr = reg;
 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
 		}
 
@@ -694,6 +694,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ret = fsl_qdma_enqueue_overflow(fsl_queue);
 	if (unlikely(ret))
@@ -701,6 +704,19 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -733,6 +749,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	uint32_t total_len;
 	uint16_t start, idx, num, i, next_idx;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 eq_sg:
 	total_len = 0;
@@ -798,6 +817,19 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	ft->desc_dsge[num - 1].final = 1;
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
 	if (ret)
 		return ret;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 9b69db517e..171c093117 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -77,8 +77,6 @@
 #define FSL_QDMA_DMR_DQD		0x40000000
 #define FSL_QDMA_DSR_DB			0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE	64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
@@ -88,18 +86,15 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
-#define FSL_QDMA_CMD_LWC_OFFSET		16
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
-#define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		64
-#define QDMA_CCSR_BASE			0x8380000
-#define VIRT_CHANNELS			32
-#define QDMA_BLOCK_OFFSET		0x10000
-#define QDMA_BLOCKS			4
-#define QDMA_QUEUES			8
-#define QDMA_DELAY			1000
+#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
+#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
+#define QDMA_CCSR_BASE 0x8380000
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
 #define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 12/15] dma/dpaa: qdma stall workaround for ERR050265
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (10 preceding siblings ...)
  2024-10-08  7:23         ` [v4 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
@ 2024-10-08  7:23         ` Gagandeep Singh
  2024-10-08  7:23         ` [v4 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
                           ` (2 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:23 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Non-prefetchable read setting in the source descriptor may be
required for targets other than local memory. Prefetchable read
setting will offer better performance for misaligned transfers
in the form of fewer transactions and should be set if possible.
This patch also fixes QDMA stall issue due to unaligned
transactions.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       | 3 ++-
 doc/guides/dmadevs/dpaa.rst  | 1 +
 drivers/dma/dpaa/dpaa_qdma.c | 9 +++++++++
 3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index f81e466318..f63ef41130 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -469,7 +469,8 @@ soc_dpaa = {
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
         ['RTE_MAX_NUMA_NODES', 1],
-	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true],
+	['RTE_DMA_DPAA_ERRATA_ERR050265', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 746919ec6b..8a7c0befc3 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -43,6 +43,7 @@ For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
 - ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+- ``RTE_DMA_DPAA_ERRATA_ERR050265`` - enable software workaround for Errata-A050265
 
 Initialization
 --------------
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index dbc53b784f..6d8e9c8183 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,6 +167,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -707,6 +710,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
@@ -820,6 +826,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 13/15] dma/dpaa: add Scatter Gather support
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (11 preceding siblings ...)
  2024-10-08  7:23         ` [v4 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
@ 2024-10-08  7:23         ` Gagandeep Singh
  2024-10-08  7:23         ` [v4 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
  2024-10-08  7:23         ` [v4 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:23 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Support copy_sg operation for scatter gather.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 55 ++++++++++++++++++++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h | 10 ++++++-
 2 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 6d8e9c8183..0aa3575fe9 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1021,6 +1021,60 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	return ret;
 }
 
+static int
+dpaa_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
+{
+	int ret;
+	uint16_t i, start, idx;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
+			__func__, nb_src, nb_dst, vchan);
+		return -EINVAL;
+	}
+
+	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	start = fsl_queue->pending_start + fsl_queue->pending_num;
+	start = start & (fsl_queue->pending_max - 1);
+	idx = start;
+
+	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+
+	for (i = 0; i < nb_src; i++) {
+		if (unlikely(src[i].length != dst[i].length)) {
+			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
+				src[i].length, dst[i].length);
+			return -EINVAL;
+		}
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		fsl_queue->pending_desc[idx].src = src[i].addr;
+		fsl_queue->pending_desc[idx].dst = dst[i].addr;
+		fsl_queue->pending_desc[idx].len = dst[i].length;
+		fsl_queue->pending_desc[idx].flag = idx_addr[i];
+	}
+	fsl_queue->pending_num += nb_src;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
+
+	return ret;
+}
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
@@ -1235,6 +1289,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->device = &dpaa_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
+	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 171c093117..1e820d0207 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -24,8 +24,13 @@
 #define QDMA_STATUS_REGION_OFFSET \
 	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
 #define QDMA_STATUS_REGION_SIZE 0x10000
-#define DPAA_QDMA_COPY_IDX_OFFSET 8
+
 #define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(DPAA_QDMA_COPY_IDX_OFFSET)
+#define DPAA_QDMA_SG_IDX_ADDR_MASK \
+	(DPAA_QDMA_SG_IDX_ADDR_ALIGN - 1)
 
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
@@ -194,6 +199,9 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
+
 #define DPAA_QDMA_IDX_FROM_FLAG(flag) \
 	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 14/15] dma/dpaa: add DMA error checks
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (12 preceding siblings ...)
  2024-10-08  7:23         ` [v4 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
@ 2024-10-08  7:23         ` Gagandeep Singh
  2024-10-08  7:23         ` [v4 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:23 UTC (permalink / raw)
  To: dev, Sachin Saxena, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add user configurable DMA error checks.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/dmadevs/dpaa.rst      |   6 ++
 drivers/dma/dpaa/dpaa_qdma.c     | 135 ++++++++++++++++++++++++++++++-
 drivers/dma/dpaa/dpaa_qdma.h     |  42 ++++++++++
 drivers/net/dpaa2/dpaa2_ethdev.c |   2 +-
 4 files changed, 183 insertions(+), 2 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 8a7c0befc3..a60457229a 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -69,3 +69,9 @@ Platform Requirement
 
 DPAA DMA driver for DPDK can only work on NXP SoCs
 as listed in the `Supported DPAA SoCs`_.
+
+Device Arguments
+----------------
+
+Use dev arg option ``dpaa_dma_err_check=1`` to check DMA errors at
+driver level. usage example: ``dpaa_bus:dpaa_qdma-1,dpaa_dma_err_check=1``
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 0aa3575fe9..3fcd9b8904 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,11 +4,15 @@
 
 #include <bus_dpaa_driver.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_kvargs.h>
 
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
 static uint32_t s_sg_max_entry_sz = 2000;
+static bool s_hw_err_check;
+
+#define DPAA_DMA_ERROR_CHECK "dpaa_dma_err_check"
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -638,7 +642,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 
 	check_num = 0;
 overflow_check:
-	if (fsl_qdma->is_silent) {
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
 		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -1076,13 +1080,81 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+	struct fsl_qdma_err_reg local;
+	size_t i, offset = 0;
+	char err_msg[512];
+
+	local.dedr_be = rte_read32(&reg->dedr_be);
+	if (!local.dedr_be)
+		return 0;
+	offset = sprintf(err_msg, "ERR detected:");
+	if (local.dedr.ere) {
+		offset += sprintf(&err_msg[offset],
+			" ere(Enqueue rejection error)");
+	}
+	if (local.dedr.dde) {
+		offset += sprintf(&err_msg[offset],
+			" dde(Destination descriptor error)");
+	}
+	if (local.dedr.sde) {
+		offset += sprintf(&err_msg[offset],
+			" sde(Source descriptor error)");
+	}
+	if (local.dedr.cde) {
+		offset += sprintf(&err_msg[offset],
+			" cde(Command descriptor error)");
+	}
+	if (local.dedr.wte) {
+		offset += sprintf(&err_msg[offset],
+			" wte(Write transaction error)");
+	}
+	if (local.dedr.rte) {
+		offset += sprintf(&err_msg[offset],
+			" rte(Read transaction error)");
+	}
+	if (local.dedr.me) {
+		offset += sprintf(&err_msg[offset],
+			" me(Multiple errors of the same type)");
+	}
+	DPAA_QDMA_ERR("%s", err_msg);
+	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+			QDMA_IN(&reg->deccd_le[i]);
+	}
+	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
+	local.decbr = rte_read32(&reg->decbr);
+
+	offset = sprintf(err_msg, "ERR command:");
+	offset += sprintf(&err_msg[offset],
+		" status: %02x, ser: %d, offset:%d, fmt: %02x",
+		local.err_cmd.status, local.err_cmd.ser,
+		local.err_cmd.offset, local.err_cmd.format);
+	offset += sprintf(&err_msg[offset],
+		" address: 0x%"PRIx64", queue: %d, dd: %02x",
+		(uint64_t)local.err_cmd.addr_hi << 32 |
+		local.err_cmd.addr_lo,
+		local.err_cmd.queue, local.err_cmd.dd);
+	DPAA_QDMA_ERR("%s", err_msg);
+	DPAA_QDMA_ERR("ERR command block: %d, queue: %d",
+		local.deccqidr.block, local.deccqidr.queue);
+
+	rte_write32(local.dedr_be, &reg->dedr_be);
+
+	return -EIO;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1107,6 +1179,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err)
+			fsl_queue->stats.errors++;
+	}
 
 	return dq_num;
 }
@@ -1117,7 +1195,9 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1138,6 +1218,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	for (i = 0; i < dq_num; i++)
 		last_idx[i] = desc_complete[i]->flag;
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err) {
+			if (has_error)
+				*has_error = true;
+			fsl_queue->stats.errors++;
+		}
+	}
+
 	return dq_num;
 }
 
@@ -1189,6 +1279,43 @@ static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.stats_reset		  = dpaa_qdma_stats_reset,
 };
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	if (strcmp(value, "1"))
+		return -1;
+
+	return 0;
+}
+
+static int
+dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
+}
+
 static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
@@ -1199,6 +1326,11 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ret;
 	uint32_t i, j, k;
 
+	if (dpaa_get_devargs(dmadev->device->devargs, DPAA_DMA_ERROR_CHECK)) {
+		s_hw_err_check = true;
+		DPAA_QDMA_INFO("Enable DMA error checks");
+	}
+
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -1340,4 +1472,5 @@ static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
 };
 
 RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(dpaa_qdma, DPAA_DMA_ERROR_CHECK "=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 1e820d0207..91eaf1455a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -199,6 +199,48 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+	uint32_t me:1;
+	uint32_t rsv0:1;
+	uint32_t rte:1;
+	uint32_t wte:1;
+	uint32_t cde:1;
+	uint32_t sde:1;
+	uint32_t dde:1;
+	uint32_t ere:1;
+	uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+	uint32_t rsv:27;
+	uint32_t block:2;
+	uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+	(sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+	uint32_t deier;
+	union {
+		rte_be32_t dedr_be;
+		struct fsl_qdma_dedr_reg dedr;
+	};
+	uint32_t rsv0[2];
+	union {
+		rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+		struct fsl_qdma_comp_cmd_desc err_cmd;
+	};
+	uint32_t rsv1[4];
+	union {
+		rte_be32_t deccqidr_be;
+		struct fsl_qdma_deccqidr_reg deccqidr;
+	};
+	rte_be32_t decbr;
+};
+
 #define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
 
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c411ad5a97..814aaf0996 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2682,7 +2682,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 
 	if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
 		dpaa2_enable_err_queue = 1;
-		DPAA2_PMD_INFO("Enable error queue");
+		DPAA2_PMD_INFO("Enable DMA error checks");
 	}
 
 	/* Allocate memory for hardware structure for queues */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v4 15/15] bus/dpaa: add port bmi stats
  2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                           ` (13 preceding siblings ...)
  2024-10-08  7:23         ` [v4 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
@ 2024-10-08  7:23         ` Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08  7:23 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add BMI statistics and fixing the existing extended
statistics

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 65 +++++++++++++++++++++++++++-
 drivers/bus/dpaa/include/fman.h      |  4 +-
 drivers/bus/dpaa/include/fsl_fman.h  | 12 +++++
 drivers/bus/dpaa/version.map         |  4 ++
 drivers/net/dpaa/dpaa_ethdev.c       | 46 +++++++++++++++++---
 drivers/net/dpaa/dpaa_ethdev.h       | 12 +++++
 6 files changed, 134 insertions(+), 9 deletions(-)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 24a99f7235..27b39a4975 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -244,8 +244,8 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
 
 	for (i = 0; i < n; i++)
-		value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) |
-				(u64)in_be32((char *)regs + base_offset +
+		value[i] = ((u64)in_be32((char *)regs + base_offset + 8 * i) |
+				((u64)in_be32((char *)regs + base_offset +
 				8 * i + 4)) << 32);
 }
 
@@ -266,6 +266,67 @@ fman_if_stats_reset(struct fman_if *p)
 		;
 }
 
+void
+fman_if_bmi_stats_enable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp |= FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_disable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp &= ~FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	int i = 0;
+
+	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
+	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
+}
+
+void
+fman_if_bmi_stats_reset(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+
+	out_be32(&regs->fmbm_rfrc, 0);
+	out_be32(&regs->fmbm_rfbc, 0);
+	out_be32(&regs->fmbm_rlfc, 0);
+	out_be32(&regs->fmbm_rffc, 0);
+	out_be32(&regs->fmbm_rfdc, 0);
+	out_be32(&regs->fmbm_rfldec, 0);
+	out_be32(&regs->fmbm_rodc, 0);
+	out_be32(&regs->fmbm_rbdc, 0);
+}
+
 void
 fman_if_promiscuous_enable(struct fman_if *p)
 {
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index f918836ec2..1f120b7614 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -56,6 +56,8 @@
 #define FMAN_PORT_BMI_FIFO_UNITS	0x100
 #define FMAN_PORT_IC_OFFSET_UNITS	0x10
 
+#define FMAN_BMI_COUNTERS_EN 0x80000000
+
 #define FMAN_ENABLE_BPOOL_DEPLETION	0xF00000F0
 
 #define HASH_CTRL_MCAST_EN	0x00000100
@@ -260,7 +262,7 @@ struct rx_bmi_regs {
 					/**< Buffer Manager pool Information-*/
 	uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
 					/**< Allocate Counter-*/
-	uint32_t reserved0130[8];
+	uint32_t reserved0120[16];
 					/**< 0x130/0x140 - 0x15F reserved -*/
 	uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
 					/**< Congestion Group Map*/
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 20690f8329..5a9750ad0c 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -60,6 +60,18 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
+__rte_internal
+void fman_if_bmi_stats_enable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_disable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value);
+
+__rte_internal
+void fman_if_bmi_stats_reset(struct fman_if *p);
+
 /* Set ignore pause option for a specific interface */
 void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
 
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 3f547f75cf..a17d57632e 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -24,6 +24,10 @@ INTERNAL {
 	fman_dealloc_bufs_mask_hi;
 	fman_dealloc_bufs_mask_lo;
 	fman_if_add_mac_addr;
+	fman_if_bmi_stats_enable;
+	fman_if_bmi_stats_disable;
+	fman_if_bmi_stats_get_all;
+	fman_if_bmi_stats_reset;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
 	fman_if_discard_rx_errors;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 51f5422e0c..da4a64d99a 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -131,6 +131,22 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 		offsetof(struct dpaa_if_stats, tvlan)},
 	{"rx_undersized",
 		offsetof(struct dpaa_if_stats, tund)},
+	{"rx_frame_counter",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
+	{"rx_bad_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
+	{"rx_large_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
+	{"rx_filter_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
+	{"rx_frame_discrad_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
+	{"rx_frame_list_dma_err_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
+	{"rx_out_of_buffer_discard ",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
+	{"rx_buf_diallocate",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
@@ -430,6 +446,7 @@ static void dpaa_interrupt_handler(void *param)
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
 	uint16_t i;
 
 	PMD_INIT_FUNC_TRACE();
@@ -443,7 +460,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 	else
 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
 
-	fman_if_enable_rx(dev->process_private);
+	fman_if_bmi_stats_enable(fif);
+	fman_if_bmi_stats_reset(fif);
+	fman_if_enable_rx(fif);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -461,8 +480,10 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 	dev->data->dev_started = 0;
 
-	if (!fif->is_shared_mac)
+	if (!fif->is_shared_mac) {
+		fman_if_bmi_stats_disable(fif);
 		fman_if_disable_rx(fif);
+	}
 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -769,6 +790,7 @@ static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	fman_if_stats_reset(dev->process_private);
+	fman_if_bmi_stats_reset(dev->process_private);
 
 	return 0;
 }
@@ -777,8 +799,9 @@ static int
 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned int n)
 {
-	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (n < num)
 		return num;
@@ -789,10 +812,16 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	fman_if_stats_get_all(dev->process_private, values,
 			      sizeof(struct dpaa_if_stats) / 8);
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < num - (bmi_count - 1); i++) {
 		xstats[i].id = i;
 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
 	}
+	fman_if_bmi_stats_get_all(dev->process_private, values);
+	for (j = 0; i < num; i++, j++) {
+		xstats[i].id = i;
+		xstats[i].value = values[j];
+	}
+
 	return i;
 }
 
@@ -819,8 +848,9 @@ static int
 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		      uint64_t *values, unsigned int n)
 {
-	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (!ids) {
 		if (n < stat_cnt)
@@ -832,10 +862,14 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		fman_if_stats_get_all(dev->process_private, values_copy,
 				      sizeof(struct dpaa_if_stats) / 8);
 
-		for (i = 0; i < stat_cnt; i++)
+		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
 			values[i] =
 				values_copy[dpaa_xstats_strings[i].offset / 8];
 
+		fman_if_bmi_stats_get_all(dev->process_private, values);
+		for (j = 0; i < stat_cnt; i++, j++)
+			values[i] = values_copy[j];
+
 		return stat_cnt;
 	}
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 0006bd33d4..1278623e7b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -212,6 +212,18 @@ dpaa_rx_cb_atomic(void *event,
 		  const struct qm_dqrr_entry *dqrr,
 		  void **bufs);
 
+struct dpaa_if_rx_bmi_stats {
+	uint32_t fmbm_rstc;		/**< Rx Statistics Counters*/
+	uint32_t fmbm_rfrc;		/**< Rx Frame Counter*/
+	uint32_t fmbm_rfbc;		/**< Rx Bad Frames Counter*/
+	uint32_t fmbm_rlfc;		/**< Rx Large Frames Counter*/
+	uint32_t fmbm_rffc;		/**< Rx Filter Frames Counter*/
+	uint32_t fmbm_rfdc;		/**< Rx Frame Discard Counter*/
+	uint32_t fmbm_rfldec;		/**< Rx Frames List DMA Error Counter*/
+	uint32_t fmbm_rodc;		/**< Rx Out of Buffers Discard nntr*/
+	uint32_t fmbm_rbdc;		/**< Rx Buffers Deallocate Counter*/
+};
+
 /* PMD related logs */
 extern int dpaa_logtype_pmd;
 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 00/15] NXP DMA driver fixes and Enhancements
  2024-10-08  7:22         ` [v4 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-10-08 10:57           ` Gagandeep Singh
  2024-10-08 10:57             ` [v5 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                               ` (14 more replies)
  0 siblings, 15 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:57 UTC (permalink / raw)
  To: dev

V5 changes:
* typos and doc compilation fixed

V4 changes:
* rebased series to latest commit and patches reduced.

V3 changes:
* fix 32 bit compilation issue

V2 changes:
* fix compilation issue on ubuntu 22.04

Hemant Agrawal (1):
  bus/dpaa: add port bmi stats

Jun Yang (14):
  dma/dpaa2: configure route by port by PCIe port param
  dma/dpaa2: refactor driver code
  bus/fslmc: enhance the qbman dq storage logic
  dma/dpaa2: add short FD support
  dma/dpaa2: limit the max descriptor number
  dma/dpaa2: change the DMA copy return value
  dma/dpaa2: move the qdma header to common place
  dma/dpaa: refactor driver
  dma/dpaa: support burst capacity API
  dma/dpaa: add silent mode support
  dma/dpaa: add workaround for ERR050757
  dma/dpaa: qdma stall workaround for ERR050265
  dma/dpaa: add Scatter Gather support
  dma/dpaa: add DMA error checks

 config/arm/meson.build                        |    4 +-
 doc/api/doxy-api-index.md                     |    2 +-
 doc/api/doxy-api.conf.in                      |    2 +-
 doc/guides/dmadevs/dpaa.rst                   |    9 +
 doc/guides/dmadevs/dpaa2.rst                  |   10 +
 drivers/bus/dpaa/base/fman/fman_hw.c          |   65 +-
 drivers/bus/dpaa/include/fman.h               |    4 +-
 drivers/bus/dpaa/include/fsl_fman.h           |   12 +
 drivers/bus/dpaa/version.map                  |    4 +
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c      |   25 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |    7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |   38 +-
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  |   29 +-
 drivers/common/dpaax/meson.build              |    3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h     |   23 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c   |   23 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c   |    4 +-
 drivers/dma/dpaa/dpaa_qdma.c                  | 1593 +++++++----
 drivers/dma/dpaa/dpaa_qdma.h                  |  292 +-
 drivers/dma/dpaa2/dpaa2_qdma.c                | 2446 +++++++++--------
 drivers/dma/dpaa2/dpaa2_qdma.h                |  243 +-
 drivers/dma/dpaa2/meson.build                 |    4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        |  177 --
 drivers/dma/dpaa2/version.map                 |   14 -
 drivers/net/dpaa/dpaa_ethdev.c                |   46 +-
 drivers/net/dpaa/dpaa_ethdev.h                |   12 +
 drivers/net/dpaa2/dpaa2_ethdev.c              |   83 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   19 +-
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c         |    4 +-
 29 files changed, 2899 insertions(+), 2298 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
 delete mode 100644 drivers/dma/dpaa2/version.map

-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 01/15] dma/dpaa2: configure route by port by PCIe port param
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
@ 2024-10-08 10:57             ` Gagandeep Singh
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-08 10:57             ` [v5 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
                               ` (13 subsequent siblings)
  14 siblings, 1 reply; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:57 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

struct {
	uint64_t coreid : 4; /**--rbp.sportid / rbp.dportid*/
	uint64_t pfid : 8; /**--rbp.spfid / rbp.dpfid*/
	uint64_t vfen : 1; /**--rbp.svfa / rbp.dvfa*/
	uint64_t vfid : 16; /**--rbp.svfid / rbp.dvfid*/
	.....
} pcie;

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  | 29 ++++++---
 drivers/dma/dpaa2/dpaa2_qdma.c                | 59 +++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h                | 38 +++++++++++-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        | 55 +----------------
 drivers/dma/dpaa2/version.map                 |  1 -
 5 files changed, 100 insertions(+), 82 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48ffb1b46e..7528b610e1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
  *
  */
 #ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
 			uint32_t saddr_hi;
 
 			uint32_t len_sl:18;
-			uint32_t rsv1:14;
-
+			uint32_t rsv13:2;
+			uint32_t svfid:6;
+			uint32_t rsv12:2;
+			uint32_t spfid:2;
+			uint32_t rsv1:2;
 			uint32_t sportid:4;
-			uint32_t rsv2:22;
+			uint32_t rsv2:1;
+			uint32_t sca:1;
+			uint32_t sat:2;
+			uint32_t sattr:3;
+			uint32_t svfa:1;
+			uint32_t stc:3;
 			uint32_t bmt:1;
-			uint32_t rsv3:1;
+			uint32_t dvfid:6;
+			uint32_t rsv3:2;
+			uint32_t dpfid:2;
+			uint32_t rsv31:2;
 			uint32_t fmt:2;
 			uint32_t sl:1;
 			uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
 			uint32_t acc_err:4;
 			uint32_t rsv5:4;
 			uint32_t ser:1;
-			uint32_t rsv6:3;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
 			uint32_t wrttype:4;
 			uint32_t dqos:3;
 			uint32_t drbp:1;
 			uint32_t dlwc:2;
-			uint32_t rsv7:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
 			uint32_t rdttype:4;
 			uint32_t sqos:3;
 			uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
 			uint32_t saddr_lo;
 
 			uint32_t saddr_hi:17;
-			uint32_t rsv1:15;
+			uint32_t rsv1_att:15;
 
 			uint32_t len;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5780e49297..5d4749eae3 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
 static inline int
 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
 		     uint32_t len, struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_rbp *rbp, int ser)
+		     struct dpaa2_qdma_rbp *rbp, int ser)
 {
 	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
 	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 			uint64_t fle_iova,
-			struct rte_dpaa2_qdma_rbp *rbp,
+			struct dpaa2_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags, uint32_t fmt)
 {
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* source */
 		sdd->read_cmd.portid = rbp->sportid;
 		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->svfid;
 
 		if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* destination */
 		sdd->write_cmd.portid = rbp->dportid;
 		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->dvfid;
 
 		if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
 		     struct rte_dpaa2_qdma_job **job,
 		     uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	size_t iova;
 	int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
 				  struct rte_dpaa2_qdma_job **job,
 				  uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
 			   struct rte_dpaa2_qdma_job **job,
 			   uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
 			struct rte_dpaa2_qdma_job **job,
 			uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	void *elem;
 	struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
 	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
 }
 
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-				struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+	const struct rte_dma_vchan_conf *conf)
 {
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.dportid = conf->dst_port.pcie.coreid;
+		vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+		if (conf->dst_port.pcie.vfen) {
+			vq->rbp.dvfa = 1;
+			vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+		}
+		vq->rbp.drbp = 1;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.sportid = conf->src_port.pcie.coreid;
+		vq->rbp.spfid = conf->src_port.pcie.pfid;
+		if (conf->src_port.pcie.vfen) {
+			vq->rbp.svfa = 1;
+			vq->rbp.dvfid = conf->src_port.pcie.vfid;
+		}
+		vq->rbp.srbp = 1;
+	}
 
-	memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
-			sizeof(struct rte_dpaa2_qdma_rbp));
+	return 0;
 }
 
 static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
-	int sg_enable = 0;
+	int sg_enable = 0, ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
+	ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+	if (ret)
+		return ret;
+
 	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
 		sg_enable = 1;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 5941b5a5d3..811906fcbc 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
 	};
 } __rte_packed;
 
+struct dpaa2_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	uint32_t dvfa:1;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	uint32_t svfa:1;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:2;
+};
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
 	/** Route by port */
-	struct rte_dpaa2_qdma_rbp rbp;
+	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
 	/** States if this vq has exclusively associated hw queue */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 5a8da46d12..b0bf9d8bcc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -13,42 +13,6 @@
 /** States if the destination addresses is physical. */
 #define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
 
-struct rte_dpaa2_qdma_rbp {
-	uint32_t use_ultrashort:1;
-	uint32_t enable:1;
-	/**
-	 * dportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t dportid:4;
-	uint32_t dpfid:2;
-	uint32_t dvfid:6;
-	/*using route by port for destination */
-	uint32_t drbp:1;
-	/**
-	 * sportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t sportid:4;
-	uint32_t spfid:2;
-	uint32_t svfid:6;
-	/* using route by port for source */
-	uint32_t srbp:1;
-	/* Virtual Function Active */
-	uint32_t vfa:1;
-	uint32_t rsv:3;
-};
-
 /** Determines a QDMA job */
 struct rte_dpaa2_qdma_job {
 	/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
 	 */
 	uint16_t status;
 	uint16_t vq_id;
+	uint64_t cnxt;
 	/**
 	 * FLE pool element maintained by user, in case no qDMA response.
 	 * Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param rbp_config
- *   Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_rbp *rbp_config);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
index fc16517f7a..43e8b2d5c5 100644
--- a/drivers/dma/dpaa2/version.map
+++ b/drivers/dma/dpaa2/version.map
@@ -10,5 +10,4 @@ EXPERIMENTAL {
 	rte_dpaa2_qdma_copy_multi;
 	rte_dpaa2_qdma_vchan_fd_us_enable;
 	rte_dpaa2_qdma_vchan_internal_sg_enable;
-	rte_dpaa2_qdma_vchan_rbp_enable;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 02/15] dma/dpaa2: refactor driver code
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-08 10:57             ` [v5 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-10-08 10:57             ` Gagandeep Singh
  2024-10-08 10:57             ` [v5 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
                               ` (12 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:57 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Anatoly Burakov; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

refactor the driver code with changes in:
- multiple HW queues
- SMA single copy and SG copy
- silent mode

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |    8 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 2208 ++++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  148 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  130 +-
 drivers/dma/dpaa2/version.map          |   13 -
 5 files changed, 1158 insertions(+), 1349 deletions(-)
 delete mode 100644 drivers/dma/dpaa2/version.map

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index d2c26231e2..eeeb5d52a8 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -73,3 +73,11 @@ Platform Requirement
 
 DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
 ``Supported DPAA2 SoCs``.
+
+Device Arguments
+----------------
+* Use dev arg option ``fle_pre_populate=1`` to pre-populate all
+  DMA descriptors with pre-initialized values.
+  usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
+* Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
+  usage example: ``fslmc:dpdmai.1,desc_debug=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5d4749eae3..6c77dc32c4 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #include <rte_eal.h>
@@ -14,220 +14,378 @@
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
-#define DPAA2_QDMA_PREFETCH "prefetch"
+#define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
+#define DPAA2_QDMA_DESC_DEBUG "desc_debug"
 
-uint32_t dpaa2_coherent_no_alloc_cache;
-uint32_t dpaa2_coherent_alloc_cache;
+static uint32_t dpaa2_coherent_no_alloc_cache;
+static uint32_t dpaa2_coherent_alloc_cache;
 
-static inline int
-qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd,
-		     struct dpaa2_qdma_rbp *rbp, int ser)
+static struct fsl_mc_io s_proc_mc_reg;
+
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
 {
-	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+	if (strcmp(value, "1"))
+		return -1;
 
-	fd->simple_pci.len_sl = len;
+	return 0;
+}
 
-	fd->simple_pci.bmt = 1;
-	fd->simple_pci.fmt = 3;
-	fd->simple_pci.sl = 1;
-	fd->simple_pci.ser = ser;
+static int
+dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
 
-	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
-	fd->simple_pci.srbp = rbp->srbp;
-	if (rbp->srbp)
-		fd->simple_pci.rdttype = 0;
-	else
-		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+	if (!devargs)
+		return 0;
 
-	/*dest is pcie memory */
-	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
-	fd->simple_pci.drbp = rbp->drbp;
-	if (rbp->drbp)
-		fd->simple_pci.wrttype = 0;
-	else
-		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
 
-	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
 
-	return 0;
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
 }
 
 static inline int
-qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd, int ser)
+qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
+	const uint16_t *elem, uint16_t nb,
+	uint16_t *free_space)
 {
-	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
-
-	fd->simple_ddr.len = len;
-
-	fd->simple_ddr.bmt = 1;
-	fd->simple_ddr.fmt = 3;
-	fd->simple_ddr.sl = 1;
-	fd->simple_ddr.ser = ser;
-	/**
-	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
-	 * Coherent copy of cacheable memory,
-	* lookup in downstream cache, no allocate
-	 * on miss
-	 */
-	fd->simple_ddr.rns = 0;
-	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
-	/**
-	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
-	 * Coherent write of cacheable memory,
-	 * lookup in downstream cache, no allocate on miss
-	 */
-	fd->simple_ddr.wns = 0;
-	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+	if (unlikely(nb > ring->free_space))
+		return 0;
 
-	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if ((ring->tail + nb) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem, nb * sizeof(uint16_t));
+		ring->tail += nb;
+	} else {
+		rte_memcpy(&ring->cntx_idx_ring[ring->tail],
+			elem,
+			(DPAA2_QDMA_MAX_DESC - ring->tail) *
+			sizeof(uint16_t));
+		rte_memcpy(&ring->cntx_idx_ring[0],
+			&elem[DPAA2_QDMA_MAX_DESC - ring->tail],
+			(nb - DPAA2_QDMA_MAX_DESC + ring->tail) *
+			sizeof(uint16_t));
+		ring->tail = (ring->tail + nb) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space -= nb;
+	ring->nb_in_ring += nb;
 
-	return 0;
+	if (free_space)
+		*free_space = ring->free_space;
+
+	return nb;
 }
 
-static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			uint64_t fle_iova,
-			struct dpaa2_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags, uint32_t fmt)
+static inline int
+qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring,
+	uint16_t *elem, uint16_t max)
 {
-	struct qdma_sdd *sdd;
-	uint64_t sdd_iova;
+	int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring;
 
-	sdd = (struct qdma_sdd *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
-			QDMA_FLE_SDD_OFFSET);
-	sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
+	if (!ret)
+		return 0;
+
+	if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			ret * sizeof(uint16_t));
+		ring->start += ret;
+	} else {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			(DPAA2_QDMA_MAX_DESC - ring->start) *
+			sizeof(uint16_t));
+		rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start],
+			&ring->cntx_idx_ring[0],
+			(ret - DPAA2_QDMA_MAX_DESC + ring->start) *
+			sizeof(uint16_t));
+		ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space += ret;
+	ring->nb_in_ring -= ret;
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
+	struct qbman_eq_desc eqdesc;
+	struct qbman_swp *swp;
+	uint32_t num_to_send = 0;
+	uint16_t num_tx = 0;
+	uint32_t enqueue_loop, loop;
+	int ret;
+	struct qbman_fd *fd = qdma_vq->fd;
+	uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx;
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d",
+				rte_gettid());
+			return -EIO;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	/* Prepare enqueue descriptor */
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_fq(&eqdesc, txq_id);
+	qbman_eq_desc_set_no_orp(&eqdesc, 0);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+	while (nb_fds > 0) {
+		num_to_send = (nb_fds > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_fds;
+
+		/* Enqueue the packet to the QBMAN */
+		enqueue_loop = 0;
+		loop = num_to_send;
+
+		while (enqueue_loop < loop) {
+			ret = qbman_swp_enqueue_multiple(swp,
+				&eqdesc,
+				&fd[num_tx + enqueue_loop],
+				NULL,
+				loop - enqueue_loop);
+			if (likely(ret >= 0))
+				enqueue_loop += ret;
+		}
+		num_tx += num_to_send;
+		nb_fds -= loop;
+	}
+
+	qdma_vq->num_enqueues += num_tx;
+	if (unlikely(num_tx != qdma_vq->fd_idx)) {
+		dst_idx = 0;
+		for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) {
+			rte_memcpy(&qdma_vq->fd[dst_idx],
+				&qdma_vq->fd[idx],
+				sizeof(struct qbman_fd));
+			dst_idx++;
+		}
+	}
+	qdma_vq->fd_idx -= num_tx;
+
+	return num_tx;
+}
+
+static void
+fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
+	struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest,
+	uint32_t fmt)
+{
+	struct qbman_fle *fle = fle_sdd->fle;
+	struct qdma_sdd *sdd = fle_sdd->sdd;
+	uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd);
 
 	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, sdd_iova);
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)));
 
 	/* source and destination descriptor */
 	if (rbp && rbp->enable) {
 		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
 		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
 		}
-		sdd++;
 		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
 		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
 		}
-
 	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
 	}
-	fle++;
 	/* source frame list to source buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
 #endif
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
-	fle++;
 	/* destination frame list to destination buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
 #endif
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
 }
 
-static inline int
-dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
-		     struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_job **job,
-		     uint16_t nb_jobs)
+static void
+sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
+{
+	uint16_t i;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+		/* source SG */
+		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+		/* destination SG */
+		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+	}
+}
+
+static void
+fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx,
+	struct qdma_virt_queue *qdma_vq)
 {
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	size_t iova;
-	int ret = 0, loop;
-	int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
-				0 : 1;
-
-	for (loop = 0; loop < nb_jobs; loop++) {
-		if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
-			iova = (size_t)job[loop]->dest;
-		else
-			iova = (size_t)job[loop]->src;
-
-		/* Set the metadata */
-		job[loop]->vq_id = qdma_vq->vq_id;
-		ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-		*ppjob = job[loop];
-
-		if ((rbp->drbp == 1) || (rbp->srbp == 1))
-			ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], rbp, ser);
-		else
-			ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], ser);
+
+	memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg));
+
+	src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge);
+	dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge);
+
+	sg_entry_pre_populate(sg_cntx);
+	fle_sdd_pre_populate(&sg_cntx->fle_sdd,
+		rbp, src_sge_iova, dst_sge_iova,
+		QBMAN_FLE_WORD4_FMT_SGE);
+}
+
+static inline uint32_t
+sg_entry_post_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i;
+	uint32_t total_len = 0;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < (nb_sge - 1); i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
+
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
+		total_len += dst[i].length;
+
+		src_sge->ctrl.f = 0;
+		dst_sge->ctrl.f = 0;
+		src_sge++;
+		dst_sge++;
 	}
 
-	return ret;
+	if (unlikely(src[i].length != dst[i].length))
+		return -ENOTSUP;
+
+	src_sge->addr_lo = (uint32_t)src[i].addr;
+	src_sge->addr_hi = (src[i].addr >> 32);
+	src_sge->data_len.data_len_sl0 = src[i].length;
+
+	dst_sge->addr_lo = (uint32_t)dst[i].addr;
+	dst_sge->addr_hi = (dst[i].addr >> 32);
+	dst_sge->data_len.data_len_sl0 = dst[i].length;
+
+	total_len += dst[i].length;
+	sg_cntx->job_nb = nb_sge;
+
+	src_sge->ctrl.f = QDMA_SG_F;
+	dst_sge->ctrl.f = QDMA_SG_F;
+
+	return total_len;
+}
+
+static inline void
+sg_fle_post_populate(struct qbman_fle fle[],
+	size_t len)
+{
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
-static uint32_t
-qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
-		       struct qdma_sg_entry *src_sge,
-		       struct qdma_sg_entry *dst_sge,
-		       uint16_t nb_jobs)
+static inline uint32_t
+sg_entry_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
 {
 	uint16_t i;
 	uint32_t total_len = 0;
-	uint64_t iova;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < nb_jobs; i++) {
-		/* source SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
-			src_sge->addr_lo = (uint32_t)jobs[i]->src;
-			src_sge->addr_hi = (jobs[i]->src >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
-			src_sge->addr_lo = (uint32_t)iova;
-			src_sge->addr_hi = iova >> 32;
-		}
-		src_sge->data_len.data_len_sl0 = jobs[i]->len;
+	for (i = 0; i < nb_sge; i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -235,16 +393,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		/* destination SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
-			dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
-			dst_sge->addr_hi = (jobs[i]->dest >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
-			dst_sge->addr_lo = (uint32_t)iova;
-			dst_sge->addr_hi = iova >> 32;
-		}
-		dst_sge->data_len.data_len_sl0 = jobs[i]->len;
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -252,9 +403,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += jobs[i]->len;
+		total_len += src[i].length;
 
-		if (i == (nb_jobs - 1)) {
+		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
 			dst_sge->ctrl.f = QDMA_SG_F;
 		} else {
@@ -265,325 +416,452 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 		dst_sge++;
 	}
 
+	sg_cntx->job_nb = nb_sge;
+
 	return total_len;
 }
 
-static inline int
-dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
-				  struct qbman_fd *fd,
-				  struct rte_dpaa2_qdma_job **job,
-				  uint16_t nb_jobs)
+static inline void
+fle_populate(struct qbman_fle fle[],
+	struct qdma_sdd sdd[], uint64_t sdd_iova,
+	struct dpaa2_qdma_rbp *rbp,
+	uint64_t src_iova, uint64_t dst_iova, size_t len,
+	uint32_t fmt)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
-	void *elem;
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		(DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))));
 
-	for (i = 0; i < nb_jobs; i++) {
-		elem = job[i]->usr_elem;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem);
-#endif
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+		if (rbp->srbp) {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
+		}
+		/* destination */
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
-		job[i]->vq_id = qdma_vq->vq_id;
+		if (rbp->drbp) {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
+		}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	} else {
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
+	}
+	/* source frame list to source buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+#endif
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
+	/* destination frame list to destination buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+#endif
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-				DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
+}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-			job[i]->src, job[i]->dest, job[i]->len,
-			job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
-	}
+static inline void
+fle_post_populate(struct qbman_fle fle[],
+	uint64_t src, uint64_t dest, size_t len)
+{
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-	return 0;
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
 static inline int
-dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
-			   struct qbman_fd *fd,
-			   struct rte_dpaa2_qdma_job **job,
-			   uint16_t nb_jobs)
+dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	uint16_t expected = qdma_vq->fd_idx;
 	int ret;
-	void *elem[DPAA2_QDMA_MAX_DESC];
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
 
-	ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
+	ret = dpaa2_qdma_multi_eq(qdma_vq);
+	if (likely(ret == expected))
+		return 0;
 
-	for (i = 0; i < nb_jobs; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem[i]);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
-#endif
+	return -EBUSY;
+}
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem[i] +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+static inline void
+dpaa2_qdma_fle_dump(const struct qbman_fle *fle)
+{
+	DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d",
+		fle->addr_hi, fle->addr_lo, fle->length, fle->frc,
+		fle->word4.bpid);
+	DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d",
+		fle->word4.ivp, fle->word4.bmt, fle->word4.offset,
+		fle->word4.fmt, fle->word4.sl, fle->word4.f);
+}
 
-		job[i]->vq_id = qdma_vq->vq_id;
+static inline void
+dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd)
+{
+	DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x",
+		sdd->stride, sdd->rbpcmd, sdd->cmd);
+}
+
+static inline void
+dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge)
+{
+	DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x",
+		sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0,
+		sge->ctrl_fields);
+}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+static void
+dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
+{
+	int i;
+	const struct qdma_cntx_fle_sdd *fle_sdd;
+	const struct qdma_sdd *sdd;
+	const struct qdma_cntx_sg *cntx_sg = NULL;
+	const struct qdma_cntx_long *cntx_long = NULL;
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
-		DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
+	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
+	sdd = fle_sdd->sdd;
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) {
+		DPAA2_QDMA_INFO("fle[%d] info:", i);
+		dpaa2_qdma_fle_dump(&fle[i]);
+	}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-				job[i]->src, job[i]->dest, job[i]->len,
-				job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
+	if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
+		fle[DPAA2_QDMA_DST_FLE].word4.fmt) {
+		DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)",
+			DPAA2_QDMA_SRC_FLE,
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt,
+			DPAA2_QDMA_DST_FLE,
+			fle[DPAA2_QDMA_DST_FLE].word4.fmt);
+
+		return;
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SGE) {
+		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
+			fle_sdd);
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SBF) {
+		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
+			fle_sdd);
+	} else {
+		DPAA2_QDMA_ERR("Unsupported fle format:%d",
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
+		return;
 	}
 
-	return 0;
+	for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) {
+		DPAA2_QDMA_INFO("sdd[%d] info:", i);
+		dpaa2_qdma_sdd_dump(&sdd[i]);
+	}
+
+	if (cntx_long) {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
+			cntx_long->cntx_idx);
+	}
+
+	if (cntx_sg) {
+		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
+			cntx_sg->job_nb);
+		if (!cntx_sg->job_nb ||
+			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			DPAA2_QDMA_ERR("Invalid SG job number:%d",
+				cntx_sg->job_nb);
+			return;
+		}
+		for (i = 0; i < cntx_sg->job_nb; i++) {
+			DPAA2_QDMA_INFO("sg[%d] src info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]);
+			DPAA2_QDMA_INFO("sg[%d] dst info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]);
+			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
+				cntx_sg->cntx_idx[i]);
+		}
+	}
 }
 
-static inline int
-dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
-			struct qbman_fd *fd,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
+static int
+dpaa2_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	void *elem;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected, i;
+	uint32_t len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_sg *cntx_sg = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova, src, dst;
-	int ret = 0, i;
-	struct qdma_sg_entry *src_sge, *dst_sge;
-	uint32_t len, fmt, flags;
-
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
-		elem = job[0]->usr_elem;
+	struct qdma_sdd *sdd;
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
+			nb_src, nb_dst);
+		return -ENOTSUP;
+	}
+
+	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
+			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+		return -EINVAL;
+	}
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_dev->is_silent) {
+		cntx_sg = qdma_vq->cntx_sg[qdma_vq->silent_idx];
 	} else {
-		ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
-		if (ret) {
-			DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_sg);
+		if (ret)
 			return ret;
-		}
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+		for (i = 0; i < nb_src; i++)
+			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	elem_iova = rte_mempool_virt2iova(elem);
+	cntx_iova = rte_mempool_virt2iova(cntx_sg);
 #else
-	elem_iova = DPAA2_VADDR_TO_IOVA(elem);
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
 #endif
 
-	/* Set the metadata */
-	/* Save job context. */
-	*((uint16_t *)
-	((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
-	ppjob = (struct rte_dpaa2_qdma_job **)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
-	for (i = 0; i < nb_jobs; i++)
-		ppjob[i] = job[i];
-
-	ppjob[0]->vq_id = qdma_vq->vq_id;
-
-	fle = (struct qbman_fle *)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-	fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	fle = cntx_sg->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_sg, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
 	DPAA2_SET_FD_ADDR(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
-		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
+			if (!qdma_dev->is_silent && cntx_sg && idx_addr) {
+				for (i = 0; i < nb_src; i++)
+					cntx_sg->cntx_idx[i] = idx_addr[i];
+			}
+		}
 
-	/* Populate FLE */
-	if (likely(nb_jobs > 1)) {
-		src_sge = (struct qdma_sg_entry *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
-		dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
-		src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
-		dst = src +
-			DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
-		len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
-		fmt = QBMAN_FLE_WORD4_FMT_SGE;
-		flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
+		len = sg_entry_post_populate(src, dst,
+			cntx_sg, nb_src);
+		sg_fle_post_populate(fle, len);
 	} else {
-		src = job[0]->src;
-		dst = job[0]->dest;
-		len = job[0]->len;
-		fmt = QBMAN_FLE_WORD4_FMT_SBF;
-		flags = job[0]->flags;
+		sdd = cntx_sg->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		src_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_src_entry);
+		dst_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_dst_entry);
+		len = sg_entry_populate(src, dst,
+			cntx_sg, nb_src);
+
+		fle_populate(fle, sdd, sdd_iova,
+			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
+			QBMAN_FLE_WORD4_FMT_SGE);
 	}
 
-	memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
-
-	dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-					src, dst, len, flags, fmt);
-
-	return 0;
-}
-
-static inline uint16_t
-dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
-		      const struct qbman_fd *fd,
-		      struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
-{
-	uint16_t vqid;
-	size_t iova;
-	struct rte_dpaa2_qdma_job **ppjob;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
-	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
-		iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
-				| (uint64_t)fd->simple_pci.daddr_lo);
-	else
-		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
-				| (uint64_t)fd->simple_pci.saddr_lo);
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-	*job = (struct rte_dpaa2_qdma_job *)*ppjob;
-	(*job)->status = (fd->simple_pci.acc_err << 8) |
-					(fd->simple_pci.error);
-	vqid = (*job)->vq_id;
-	*nb_jobs = 1;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return vqid;
+	return ret;
 }
 
-static inline uint16_t
-dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-			     const struct qbman_fd *fd,
-			     struct rte_dpaa2_qdma_job **job,
-			     uint16_t *nb_jobs)
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_long *cntx_long = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t status;
+	struct qdma_sdd *sdd;
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	memset(fd, 0, sizeof(struct qbman_fd));
 
-	*nb_jobs = 1;
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-			QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
+	if (qdma_dev->is_silent) {
+		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	} else {
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_long);
+		if (ret)
+			return ret;
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
+	}
 
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	cntx_iova = rte_mempool_virt2iova(cntx_long);
+#else
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
+#endif
 
-	*job = *ppjob;
-	(*job)->status = status;
+	fle = cntx_long->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_long, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+				&qdma_vq->rbp,
+				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+			if (!qdma_dev->is_silent && cntx_long) {
+				cntx_long->cntx_idx =
+					DPAA2_QDMA_IDX_FROM_FLAG(flags);
+			}
+		}
 
-	return (*job)->vq_id;
-}
+		fle_post_populate(fle, src, dst, length);
+	} else {
+		sdd = cntx_long->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_long, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
+			src, dst, length,
+			QBMAN_FLE_WORD4_FMT_SBF);
+	}
 
-static inline uint16_t
-dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-			 const struct qbman_fd *fd,
-			 struct rte_dpaa2_qdma_job **job,
-			 uint16_t *nb_jobs)
-{
-	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t i, status;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	*nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	for (i = 0; i < (*nb_jobs); i++) {
-		job[i] = ppjob[i];
-		job[i]->status = status;
-	}
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return job[0]->vq_id;
+	return ret;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
-static int
-dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-				     uint16_t *vq_id,
-				     struct rte_dpaa2_qdma_job **job,
-				     uint16_t nb_jobs)
+static uint16_t
+dpaa2_qdma_dequeue(void *dev_private,
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *cntx_idx, bool *has_error)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
 	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
 	uint8_t status, pending;
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
 	int ret, pull_size;
+	struct qbman_fle *fle;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_long *cntx_long;
+	uint16_t free_space = 0, fle_elem_nb = 0;
 
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
+	if (unlikely(qdma_dev->is_silent))
+		return 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
+			DPAA2_QDMA_ERR("Allocate portal err, tid(%d)",
 				rte_gettid());
+			if (has_error)
+				*has_error = true;
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_cpls > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_cpls;
+	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
+	fqid = rxq->fqid;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -592,21 +870,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->last_num_pkts = pull_size;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      q_storage->last_num_pkts);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+			q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
-				get_swp_active_dqs(
-				DPAA2_PER_LCORE_DPIO->index)))
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
 				;
 			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued.QBMAN busy");
+				DPAA2_QDMA_DP_WARN("QBMAN busy");
 					/* Portal was busy, try again */
 				continue;
 			}
@@ -615,7 +892,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->active_dqs = dq_storage;
 		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
-				   dq_storage);
+			dq_storage);
 	}
 
 	dq_storage = q_storage->active_dqs;
@@ -629,7 +906,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
-	qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
 
@@ -662,563 +939,239 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-
-		vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
-								&num_rx_ret);
-		if (vq_id)
-			vq_id[num_rx] = vqid;
-
-		dq_storage++;
-		num_rx += num_rx_ret;
-	} while (pending);
-
-	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-		while (!qbman_check_command_complete(
-			get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-			;
-		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
-	}
-	/* issue a volatile dequeue command for next pull */
-	while (1) {
-		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN(
-				"VDQ command is not issued. QBMAN is busy (2)");
-			continue;
-		}
-		break;
-	}
-
-	q_storage->active_dqs = dq_storage1;
-	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
-	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-					uint16_t *vq_id,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
-	struct qbman_result *dq_storage;
-	struct qbman_pull_desc pulldesc;
-	struct qbman_swp *swp;
-	uint8_t status, pending;
-	uint8_t num_rx = 0;
-	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
-	int ret, next_pull, num_pulled = 0;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
-
-	next_pull = nb_jobs;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	rxq = &(dpdmai_dev->rx_queue[0]);
-
-	do {
-		dq_storage = rxq->q_storage->dq_storage[0];
-		/* Prepare dequeue descriptor */
-		qbman_pull_desc_clear(&pulldesc);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
-		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-
-		if (next_pull > dpaa2_dqrr_size) {
-			qbman_pull_desc_set_numframes(&pulldesc,
-					dpaa2_dqrr_size);
-			next_pull -= dpaa2_dqrr_size;
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		fle = fle_sdd->fle;
+		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
+		fle_elem_nb++;
+		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+			QBMAN_FLE_WORD4_FMT_SGE) {
+			cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, &free_space);
 		} else {
-			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
-			next_pull = 0;
-		}
-
-		while (1) {
-			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued. QBMAN busy");
-				/* Portal was busy, try again */
-				continue;
-			}
-			break;
-		}
-
-		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-		/* Check if the previous issued command is completed. */
-		while (!qbman_check_command_complete(dq_storage))
-			;
-
-		num_pulled = 0;
-		pending = 1;
-
-		do {
-			/* Loop until dq_storage is updated
-			 * with new token by QBMAN
-			 */
-			while (!qbman_check_new_result(dq_storage))
-				;
-			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-
-			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-				pending = 0;
-				/* Check for valid frame. */
-				status = qbman_result_DQ_flags(dq_storage);
-				if (unlikely((status &
-					QBMAN_DQ_STAT_VALIDFRAME) == 0))
-					continue;
-			}
-			fd = qbman_result_DQ_fd(dq_storage);
-
-			vqid = qdma_vq->get_job(qdma_vq, fd,
-						&job[num_rx], &num_rx_ret);
-			if (vq_id)
-				vq_id[num_rx] = vqid;
-
-			dq_storage++;
-			num_rx += num_rx_ret;
-			num_pulled++;
-
-		} while (pending);
-	/* Last VDQ provided all packets and more packets are requested */
-	} while (next_pull && num_pulled == dpaa2_dqrr_size);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
-	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
-	struct qbman_eq_desc eqdesc;
-	struct qbman_swp *swp;
-	uint32_t num_to_send = 0;
-	uint16_t num_tx = 0;
-	uint32_t enqueue_loop, loop;
-	int ret;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
+			cntx_long = container_of(fle_sdd,
+				struct qdma_cntx_long, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&cntx_long->cntx_idx,
+				1, &free_space);
 		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	/* Prepare enqueue descriptor */
-	qbman_eq_desc_clear(&eqdesc);
-	qbman_eq_desc_set_fq(&eqdesc, txq_id);
-	qbman_eq_desc_set_no_orp(&eqdesc, 0);
-	qbman_eq_desc_set_response(&eqdesc, 0, 0);
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		uint16_t fd_nb;
-		uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		uint16_t job_idx = 0;
-		uint16_t fd_sg_nb[8];
-		uint16_t nb_jobs_ret = 0;
-
-		if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
-		else
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
-
-		memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
-
-		for (loop = 0; loop < fd_nb; loop++) {
-			ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
-					      sg_entry_nb);
-			if (unlikely(ret < 0))
-				return 0;
-			fd_sg_nb[loop] = sg_entry_nb;
-			nb_jobs -= sg_entry_nb;
-			job_idx += sg_entry_nb;
-			sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		}
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-
-		while (enqueue_loop < fd_nb) {
-			ret = qbman_swp_enqueue_multiple(swp,
-					&eqdesc, &fd[enqueue_loop],
-					NULL, fd_nb - enqueue_loop);
-			if (likely(ret >= 0)) {
-				for (loop = 0; loop < (uint32_t)ret; loop++)
-					nb_jobs_ret +=
-						fd_sg_nb[enqueue_loop + loop];
-				enqueue_loop += ret;
-			}
-		}
-
-		return nb_jobs_ret;
-	}
-
-	memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
-
-	while (nb_jobs > 0) {
-		num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
-			dpaa2_eqcr_size : nb_jobs;
-
-		ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
-						&job[num_tx], num_to_send);
-		if (unlikely(ret < 0))
-			break;
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-		loop = num_to_send;
-
-		while (enqueue_loop < loop) {
-			ret = qbman_swp_enqueue_multiple(swp,
-						&eqdesc,
-						&fd[num_tx + enqueue_loop],
-						NULL,
-						loop - enqueue_loop);
-			if (likely(ret >= 0))
-				enqueue_loop += ret;
-		}
-		num_tx += num_to_send;
-		nb_jobs -= loop;
-	}
-
-	qdma_vq->num_enqueues += num_tx;
-
-	return num_tx;
-}
-
-static inline int
-dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
-				qdma_vq->num_valid_jobs);
-
-	qdma_vq->num_valid_jobs = 0;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-		   rte_iova_t src, rte_iova_t dst,
-		   uint32_t length, uint64_t flags)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *job;
-	int idx, ret;
-
-	idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
-
-	ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return -ENOSPC;
-	}
-
-	job->src = src;
-	job->dest = dst;
-	job->len = length;
-	job->flags = flags;
-	job->status = 0;
-	job->vq_id = vchan;
-
-	qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
-	qdma_vq->num_valid_jobs++;
-
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
-		dpaa2_qdma_submit(dev_private, vchan);
-
-	return idx;
-}
-
-int
-rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-			  struct rte_dpaa2_qdma_job **jobs,
-			  uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
-			 struct qdma_virt_queue *qdma_vq,
-			 struct rte_dpaa2_qdma_job **jobs,
-			 uint16_t nb_jobs)
-{
-	struct qdma_virt_queue *temp_qdma_vq;
-	int ring_count;
-	int ret = 0, i;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-	}
-
-	/* Only dequeue when there are pending jobs on VQ */
-	if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
-		return 0;
-
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
-		qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
-		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
-				qdma_vq->num_dequeues), nb_jobs);
-
-	if (qdma_vq->exclusive_hw_queue) {
-		/* In case of exclusive queue directly fetch from HW queue */
-		ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-		if (ret < 0) {
-			DPAA2_QDMA_ERR(
-				"Dequeue from DPDMAI device failed: %d", ret);
-			return ret;
-		}
-	} else {
-		uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
+		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+			pending = 0;
 
-		/* Get the QDMA completed jobs from the software ring.
-		 * In case they are not available on the ring poke the HW
-		 * to fetch completed jobs from corresponding HW queues
-		 */
-		ring_count = rte_ring_count(qdma_vq->status_ring);
-		if (ring_count < nb_jobs) {
-			ret = qdma_vq->dequeue_job(qdma_vq,
-					temp_vq_id, jobs, nb_jobs);
-			for (i = 0; i < ret; i++) {
-				temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
-				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(jobs[i]));
-			}
-			ring_count = rte_ring_count(
-					qdma_vq->status_ring);
-		}
+		dq_storage++;
+	} while (pending);
 
-		if (ring_count) {
-			/* Dequeue job from the software ring
-			 * to provide to the user
-			 */
-			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-						    (void **)jobs,
-						    ring_count, NULL);
+	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+		while (!qbman_check_command_complete(
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+			;
+		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+	}
+	/* issue a volatile dequeue command for next pull */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_QDMA_DP_WARN("QBMAN is busy (2)");
+			continue;
 		}
+		break;
 	}
 
-	qdma_vq->num_dequeues += ret;
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			  const uint16_t nb_cpls,
-			  uint16_t *last_idx,
-			  enum rte_dma_status_code *st)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret, i;
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
-
-	for (i = 0; i < ret; i++)
-		st[i] = jobs[i]->status;
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue(void *dev_private,
-		   uint16_t vchan, const uint16_t nb_cpls,
-		   uint16_t *last_idx, bool *has_error)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret;
-
-	RTE_SET_USED(has_error);
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
-				jobs, nb_cpls);
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
+	q_storage->active_dqs = dq_storage1;
+	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
+	rte_mempool_put_bulk(qdma_vq->fle_pool,
+		qdma_vq->fle_elem, fle_elem_nb);
 
-	return ret;
-}
+	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
+		cntx_idx, nb_cpls);
 
-uint16_t
-rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-			       struct rte_dpaa2_qdma_job **jobs,
-			       uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	if (has_error)
+		*has_error = false;
 
-	return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	return num_rx;
 }
 
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
-		    struct rte_dma_info *dev_info,
-		    uint32_t info_sz)
+	struct rte_dma_info *dev_info,
+	uint32_t info_sz __rte_unused)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+		RTE_DMA_CAPA_MEM_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_MEM |
+		RTE_DMA_CAPA_SILENT |
+		RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->dev_name = dev->device->name;
+	if (dpdmai_dev->qdma_dev)
+		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
 
 	return 0;
 }
 
 static int
 dpaa2_qdma_configure(struct rte_dma_dev *dev,
-		     const struct rte_dma_conf *dev_conf,
-		     uint32_t conf_sz)
+	const struct rte_dma_conf *dev_conf,
+	uint32_t conf_sz)
 {
-	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint16_t i;
+	struct dpdmai_rx_queue_cfg rx_queue_cfg;
+	struct dpdmai_rx_queue_attr rx_attr;
+	struct dpdmai_tx_queue_attr tx_attr;
+	struct dpaa2_queue *rxq;
+	int ret = 0;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before config.");
-		return -1;
-	}
+	if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+		DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+			dev->data->dev_name, dev_conf->nb_vchans,
+			dpdmai_dev->num_queues);
 
-	/* Allocate Virtual Queues */
-	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
-	qdma_dev->vqs = rte_malloc(name,
-			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
-			RTE_CACHE_LINE_SIZE);
-	if (!qdma_dev->vqs) {
-		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
-		return -ENOMEM;
+		return -ENOTSUP;
 	}
-	qdma_dev->num_vqs = dev_conf->nb_vchans;
-
-	return 0;
-}
-
-static int
-check_devargs_handler(__rte_unused const char *key,
-		      const char *value,
-		      __rte_unused void *opaque)
-{
-	if (strcmp(value, "1"))
-		return -1;
 
-	return 0;
-}
+	if (qdma_dev->vqs) {
+		DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+			dev->data->dev_name,
+			qdma_dev->num_vqs, dev_conf->nb_vchans);
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if ((qdma_dev->vqs[i].num_enqueues !=
+				qdma_dev->vqs[i].num_dequeues) &&
+				!qdma_dev->is_silent) {
+				DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+					i, qdma_dev->vqs[i].num_enqueues -
+					qdma_dev->vqs[i].num_dequeues);
+				return -EBUSY;
+			}
+		}
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+			rxq = &dpdmai_dev->rx_queue[i];
+			if (rxq->q_storage) {
+				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+					dev->data->dev_name, i);
+				dpaa2_free_dq_storage(rxq->q_storage);
+				rte_free(rxq->q_storage);
+				rxq->q_storage = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+		qdma_dev->num_vqs = 0;
+	}
 
-static int
-dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
-{
-	struct rte_kvargs *kvlist;
+	/* Set up Rx Queues */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		rxq = &dpdmai_dev->rx_queue[i];
+		ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			return ret;
+		}
+	}
 
-	if (!devargs)
-		return 0;
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
 
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (!kvlist)
-		return 0;
+		ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+	}
 
-	if (!rte_kvargs_count(kvlist, key)) {
-		rte_kvargs_free(kvlist);
-		return 0;
+	/* Allocate Virtual Queues */
+	qdma_dev->vqs = rte_zmalloc(NULL,
+		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+		RTE_CACHE_LINE_SIZE);
+	if (!qdma_dev->vqs) {
+		DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.",
+			dev->data->dev_name, dev_conf->nb_vchans);
+		return -ENOMEM;
 	}
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		qdma_dev->vqs[i].vq_id = i;
+		rxq = &dpdmai_dev->rx_queue[i];
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq->q_storage = rte_zmalloc(NULL,
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 
-	if (rte_kvargs_process(kvlist, key,
-			       check_devargs_handler, NULL) < 0) {
-		rte_kvargs_free(kvlist);
-		return 0;
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 	}
-	rte_kvargs_free(kvlist);
 
-	return 1;
-}
+	qdma_dev->num_vqs = dev_conf->nb_vchans;
+	qdma_dev->is_silent = dev_conf->enable_silent;
 
-/* Enable FD in Ultra Short format */
-void
-rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	return 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
-}
+alloc_failed:
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
 
-/* Enable internal SG processing */
-void
-rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	rte_free(qdma_dev->vqs);
+	qdma_dev->vqs = NULL;
+	qdma_dev->num_vqs = 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
+	return ret;
 }
 
 static int
@@ -1257,16 +1210,14 @@ dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 
 static int
 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
-		       const struct rte_dma_vchan_conf *conf,
-		       uint32_t conf_sz)
+	const struct rte_dma_vchan_conf *conf,
+	uint32_t conf_sz)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
-	char ring_name[32];
 	char pool_name[64];
-	int fd_long_format = 1;
-	int sg_enable = 0, ret;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1276,99 +1227,67 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	if (ret)
 		return ret;
 
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
-		sg_enable = 1;
-
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
-		fd_long_format = 0;
-
-	if (dev->data->dev_conf.enable_silent)
-		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
-
-	if (sg_enable) {
-		if (qdma_dev->num_vqs != 1) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports physical queue!");
-			return -ENODEV;
-		}
-		if (!fd_long_format) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports long FD format!");
-			return -ENODEV;
-		}
-		pool_size = QDMA_FLE_SG_POOL_SIZE;
-	} else {
-		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_FLE_PRE_POPULATE))
+		qdma_dev->vqs[vchan].fle_pre_populate = 1;
+	else
+		qdma_dev->vqs[vchan].fle_pre_populate = 0;
 
-	if (qdma_dev->num_vqs == 1)
-		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
-	else {
-		/* Allocate a Ring for Virtual Queue in VQ mode */
-		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
-			 dev->data->dev_id, vchan);
-		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
-			conf->nb_desc, rte_socket_id(), 0);
-		if (!qdma_dev->vqs[vchan].status_ring) {
-			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			return rte_errno;
-		}
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_DESC_DEBUG))
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG;
+	else
+		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
+			    sizeof(struct qdma_cntx_long));
+
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+			DPAA2_QDMA_MAX_DESC * 2, pool_size,
+			512, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
 	if (!qdma_dev->vqs[vchan].fle_pool) {
-		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
-		return -ENOMEM;
-	}
-
-	snprintf(pool_name, sizeof(pool_name),
-		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev->vqs[vchan].job_pool) {
-		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
 
-	if (fd_long_format) {
-		if (sg_enable) {
-			qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
-		} else {
-			if (dev->data->dev_conf.enable_silent)
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf_no_rsp;
-			else
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
+	if (qdma_dev->is_silent) {
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_sg,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("sg cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
+		}
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_long,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
 		}
 	} else {
-		qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
-		qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
-	}
-
-	if (dpaa2_qdma_get_devargs(dev->device->devargs,
-			DPAA2_QDMA_PREFETCH)) {
-		/* If no prefetch is configured. */
-		qdma_dev->vqs[vchan].dequeue_job =
-				dpdmai_dev_dequeue_multijob_prefetch;
-		DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
-	} else {
-		qdma_dev->vqs[vchan].dequeue_job =
-			dpdmai_dev_dequeue_multijob_no_prefetch;
+		qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL,
+				sizeof(struct qdma_cntx_idx_ring),
+				RTE_CACHE_LINE_SIZE);
+		if (!qdma_dev->vqs[vchan].ring_cntx_idx) {
+			DPAA2_QDMA_ERR("DQ response ring alloc failed.");
+			return -ENOMEM;
+		}
+		qdma_dev->vqs[vchan].ring_cntx_idx->start = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->free_space =
+				QDMA_CNTX_IDX_RING_MAX_FREE;
+		qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0;
+		qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL,
+				sizeof(void *) * DPAA2_QDMA_MAX_DESC,
+				RTE_CACHE_LINE_SIZE);
 	}
 
 	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
 	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
-	qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
 
 	return 0;
 }
@@ -1377,11 +1296,17 @@ static int
 dpaa2_qdma_start(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 1;
+	/* Enable the device */
+	ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1390,46 +1315,71 @@ static int
 dpaa2_qdma_stop(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 0;
+	/* Disable the device */
+	ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
 
 static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct dpaa2_queue *rxq;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before reset.");
-		return -EBUSY;
-	}
+	if (!qdma_dev)
+		return 0;
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
-		    qdma_dev->vqs[i].num_dequeues)) {
-			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+		if ((qdma_dev->vqs[i].num_enqueues !=
+		    qdma_dev->vqs[i].num_dequeues) &&
+		    !qdma_dev->is_silent) {
+			DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")",
+				i, qdma_dev->vqs[i].num_enqueues,
+				qdma_dev->vqs[i].num_dequeues);
 			return -EBUSY;
 		}
 	}
 
-	/* Reset and free virtual queues */
+	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		rte_ring_free(qdma_dev->vqs[i].status_ring);
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	if (qdma_dev->vqs) {
+		/* Free RXQ fle pool */
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
 	}
-	rte_free(qdma_dev->vqs);
-	qdma_dev->vqs = NULL;
 
 	/* Reset QDMA device structure */
 	qdma_dev->num_vqs = 0;
@@ -1438,18 +1388,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 }
 
 static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	dpaa2_qdma_reset(dev);
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1504,123 +1444,44 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			     dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("dmdmai disable failed");
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
+			dpdmai_dev->dpdmai_id);
+		return 0;
+	}
 
-	/* Set up the DQRR storage for Rx */
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	/* Close the device at underlying layer*/
+	ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+			dpdmai_dev->dpdmai_id, ret);
 
-	if (rxq->q_storage) {
-		dpaa2_free_dq_storage(rxq->q_storage);
-		rte_free(rxq->q_storage);
+		return ret;
 	}
 
-	/* Close the device at underlying layer*/
-	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("Failure closing dpdmai device");
+	if (qdma_dev) {
+		rte_free(qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
 
-	return 0;
+	return ret;
 }
 
 static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpdmai_rx_queue_cfg rx_queue_cfg;
 	struct dpdmai_attr attr;
-	struct dpdmai_rx_queue_attr rx_attr;
-	struct dpdmai_tx_queue_attr tx_attr;
-	struct dpaa2_queue *rxq;
-	int ret;
+	int ret, err;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* Open DPDMAI device */
-	dpdmai_dev->dpdmai_id = dpdmai_id;
-	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
-					  RTE_CACHE_LINE_SIZE);
-	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
-		return ret;
-	}
-
-	/* Get DPDMAI attributes */
-	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				    dpdmai_dev->token, &attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->num_queues = attr.num_of_queues;
-
-	/* Set up Rx Queue */
-	memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-	ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				  CMD_PRI_LOW,
-				  dpdmai_dev->token,
-				  0, 0, &rx_queue_cfg);
-	if (ret) {
-		DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-
-	/* Allocate DQ storage for the DPDMAI Rx queues */
-	rxq = &(dpdmai_dev->rx_queue[0]);
-	rxq->q_storage = rte_malloc("dq_storage",
-				    sizeof(struct queue_storage_info_t),
-				    RTE_CACHE_LINE_SIZE);
-	if (!rxq->q_storage) {
-		DPAA2_QDMA_ERR("q_storage allocation failed");
-		ret = -ENOMEM;
-		goto init_err;
-	}
-
-	memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-	ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
-		goto init_err;
-	}
-
-	/* Get Rx and Tx queues FQID */
-	ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &rx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
-
-	ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &tx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
-
-	/* Enable the device */
-	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			    dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
-		goto init_err;
-	}
-
 	if (!dpaa2_coherent_no_alloc_cache) {
 		if (dpaa2_svr_family == SVR_LX2160A) {
 			dpaa2_coherent_no_alloc_cache =
@@ -1635,24 +1496,76 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 		}
 	}
 
-	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+	if (!s_proc_mc_reg.regs)
+		s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+			dpdmai_id);
+		if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+			DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+				dpdmai_id, dpdmai_dev->dpdmai_id);
+			return -EINVAL;
+		}
+		if (!dpdmai_dev->qdma_dev) {
+			DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+				dpdmai_id);
+			return -ENOMEM;
+		}
+		if (dpdmai_dev->qdma_dev->num_vqs) {
+			DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+				dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+		}
+
+		return 0;
+	}
+
+	/* Open DPDMAI device */
+	dpdmai_dev->dpdmai_id = dpdmai_id;
+
+	if (dpdmai_dev->qdma_dev) {
+		rte_free(dpdmai_dev->qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
+	dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
+		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
+	if (!dpdmai_dev->qdma_dev) {
+		DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+			dpdmai_id);
+		return -ENOMEM;
+	}
+	ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		return ret;
+	}
 
-	/* Reset the QDMA device */
-	ret = dpaa2_qdma_reset(dev);
+	/* Get DPDMAI attributes */
+	ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token, &attr);
 	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		goto init_err;
+		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+		if (err) {
+			DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+				dpdmai_dev->dpdmai_id, err);
+		}
+		return ret;
 	}
+	dpdmai_dev->num_queues = attr.num_of_queues;
+
+	DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
 
 	return 0;
-init_err:
-	dpaa2_dpdmai_dev_uninit(dev);
-	return ret;
 }
 
 static int
 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
-		 struct rte_dpaa2_device *dpaa2_dev)
+	struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_dma_dev *dmadev;
 	int ret;
@@ -1662,8 +1575,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	RTE_SET_USED(dpaa2_drv);
 
 	dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
-				      rte_socket_id(),
-				      sizeof(struct dpaa2_dpdmai_dev));
+		rte_socket_id(),
+		sizeof(struct dpaa2_dpdmai_dev));
 	if (!dmadev) {
 		DPAA2_QDMA_ERR("Unable to allocate dmadevice");
 		return -EINVAL;
@@ -1673,10 +1586,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	dmadev->dev_ops = &dpaa2_qdma_ops;
 	dmadev->device = &dpaa2_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
-	dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
+	dmadev->fp_obj->copy = dpaa2_qdma_copy;
+	dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa2_qdma_submit;
 	dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
-	dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
 	dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
@@ -1718,5 +1631,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
-	"no_prefetch=<int> ");
+	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
+	DPAA2_QDMA_DESC_DEBUG"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 811906fcbc..371393cb85 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,11 +1,14 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
-#define DPAA2_QDMA_MAX_DESC		1024
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+#define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
@@ -13,48 +16,9 @@
 #define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
 #define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
 
-#define DPAA2_QDMA_MAX_FLE 3
-#define DPAA2_QDMA_MAX_SDD 2
-
-#define DPAA2_QDMA_MAX_SG_NB 64
-
-#define DPAA2_DPDMAI_MAX_QUEUES	1
-
-/** FLE single job pool size: job pointer(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor.
- */
-#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
-			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-/** FLE sg jobs pool size: job number(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor  +
- * 64 (src + dst) sg entries + 64 jobs pointers.
- */
-#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
-		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
-		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
-
-#define QDMA_FLE_JOB_NB_OFFSET 0
+#define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_FLE_SINGLE_JOB_OFFSET 0
-
-#define QDMA_FLE_FLE_OFFSET \
-		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
-
-#define QDMA_FLE_SDD_OFFSET \
-		(QDMA_FLE_FLE_OFFSET + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
-
-#define QDMA_FLE_SG_ENTRY_OFFSET \
-		(QDMA_FLE_SDD_OFFSET + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-#define QDMA_FLE_SG_JOBS_OFFSET \
-		(QDMA_FLE_SG_ENTRY_OFFSET + \
-		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
+#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
 
 /** FLE pool cache size */
 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
@@ -202,12 +166,49 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum {
+	DPAA2_QDMA_SDD_FLE,
+	DPAA2_QDMA_SRC_FLE,
+	DPAA2_QDMA_DST_FLE,
+	DPAA2_QDMA_MAX_FLE
+};
+
+enum {
+	DPAA2_QDMA_SRC_SDD,
+	DPAA2_QDMA_DST_SDD,
+	DPAA2_QDMA_MAX_SDD
+};
+
+struct qdma_cntx_fle_sdd {
+	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
+	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
+} __rte_packed;
+
+struct qdma_cntx_sg {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t job_nb;
+	uint16_t rsv[3];
+} __rte_packed;
+
+struct qdma_cntx_long {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	uint16_t cntx_idx;
+	uint16_t rsv[3];
+} __rte_packed;
+
+#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
 	TAILQ_ENTRY(dpaa2_qdma_device) next;
-	/** handle to DPDMAI object */
-	struct fsl_mc_io dpdmai;
 	/** HW ID for DPDMAI object */
 	uint32_t dpdmai_id;
 	/** Tocken of this device */
@@ -221,42 +222,30 @@ struct dpaa2_dpdmai_dev {
 	struct qdma_device *qdma_dev;
 };
 
-struct qdma_virt_queue;
-
-typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
-					const struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t *nb_jobs);
-typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
-					struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs);
-
-typedef int (qdma_dequeue_multijob_t)(
-				struct qdma_virt_queue *qdma_vq,
-				uint16_t *vq_id,
-				struct rte_dpaa2_qdma_job **job,
-				uint16_t nb_jobs);
+#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
+#define QDMA_CNTX_IDX_RING_MAX_FREE \
+	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
+struct qdma_cntx_idx_ring {
+	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
+	uint16_t start;
+	uint16_t tail;
+	uint16_t free_space;
+	uint16_t nb_in_ring;
+};
 
-typedef int (qdma_enqueue_multijob_t)(
-			struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs);
+#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
-	/** Status ring of the virtual queue */
-	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
-	uint8_t in_use;
-	/** States if this vq has exclusively associated hw queue */
-	uint8_t exclusive_hw_queue;
+	uint8_t fle_pre_populate;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
@@ -266,18 +255,18 @@ struct qdma_virt_queue {
 
 	uint16_t vq_id;
 	uint32_t flags;
+	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
+	uint16_t fd_idx;
+	struct qdma_cntx_idx_ring *ring_cntx_idx;
+
+	/**Used for silent enabled*/
+	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	uint16_t silent_idx;
 
-	struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
-	struct rte_mempool *job_pool;
 	int num_valid_jobs;
 
 	struct rte_dma_stats stats;
-
-	qdma_set_fd_t *set_fd;
-	qdma_get_job_t *get_job;
-
-	qdma_dequeue_multijob_t *dequeue_job;
-	qdma_enqueue_multijob_t *enqueue_job;
 };
 
 /** Represents a QDMA device. */
@@ -286,8 +275,7 @@ struct qdma_device {
 	struct qdma_virt_queue *vqs;
 	/** Total number of VQ's */
 	uint16_t num_vqs;
-	/** Device state - started or stopped */
-	uint8_t state;
+	uint8_t is_silent;
 };
 
 #endif /* _DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index b0bf9d8bcc..e49604c8fc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2023 NXP
  */
 
 #ifndef _RTE_PMD_DPAA2_QDMA_H_
@@ -7,118 +7,30 @@
 
 #include <rte_compat.h>
 
-/** States if the source addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_SRC_PHY		(1ULL << 30)
+#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
+#define RTE_DPAA2_QDMA_LEN_MASK \
+	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/** States if the destination addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
+#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
+	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
 
-/** Determines a QDMA job */
-struct rte_dpaa2_qdma_job {
-	/** Source Address from where DMA is (to be) performed */
-	uint64_t src;
-	/** Destination Address where DMA is (to be) done */
-	uint64_t dest;
-	/** Length of the DMA operation in bytes. */
-	uint32_t len;
-	/** See RTE_QDMA_JOB_ flags */
-	uint32_t flags;
-	/**
-	 * Status of the transaction.
-	 * This is filled in the dequeue operation by the driver.
-	 * upper 8bits acc_err for route by port.
-	 * lower 8bits fd error
-	 */
-	uint16_t status;
-	uint16_t vq_id;
-	uint64_t cnxt;
-	/**
-	 * FLE pool element maintained by user, in case no qDMA response.
-	 * Note: the address must be allocated from DPDK memory pool.
-	 */
-	void *usr_elem;
-};
+#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
+	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable FD in Ultra Short format on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
+	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable internal SG processing on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enqueue a copy operation onto the virtual DMA channel for silent mode,
- * when dequeue is not required.
- *
- * This queues up a copy operation to be performed by hardware, if the 'flags'
- * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
- * this operation, otherwise do not trigger doorbell.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs to be submitted to QDMA.
- * @param nb_cpls
- *   Number of DMA jobs.
- *
- * @return
- *   - >= 0..Number of enqueued job.
- *   - -ENOSPC: if no space left to enqueue.
- *   - other values < 0 on failure.
- */
-__rte_experimental
-int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Return the number of operations that have been successfully completed.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs completed by QDMA.
- * @param nb_cpls
- *   Number of completed DMA jobs.
- *
- * @return
- *   The number of operations that successfully completed. This return value
- *   must be less than or equal to the value of nb_cpls.
- */
-__rte_experimental
-uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
+#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
 
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
deleted file mode 100644
index 43e8b2d5c5..0000000000
--- a/drivers/dma/dpaa2/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-DPDK_25 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 22.07
-	rte_dpaa2_qdma_completed_multi;
-	rte_dpaa2_qdma_copy_multi;
-	rte_dpaa2_qdma_vchan_fd_us_enable;
-	rte_dpaa2_qdma_vchan_internal_sg_enable;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 03/15] bus/fslmc: enhance the qbman dq storage logic
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-08 10:57             ` [v5 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-10-08 10:57             ` [v5 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
@ 2024-10-08 10:57             ` Gagandeep Singh
  2024-10-08 10:57             ` [v5 04/15] dma/dpaa2: add short FD support Gagandeep Singh
                               ` (11 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:57 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Multiple DQ storages are used among multiple cores, the single dq
storage of first union is leak if multiple storages are allocated.
It does not make sense to keep the single dq storage of union,
remove it and reuse the first dq storage of multiple storages
for this case.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c    | 25 ++-----
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     | 38 +++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 23 ++----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c |  4 +-
 drivers/dma/dpaa2/dpaa2_qdma.c              | 41 ++---------
 drivers/net/dpaa2/dpaa2_ethdev.c            | 81 ++++++++-------------
 drivers/net/dpaa2/dpaa2_rxtx.c              | 19 +++--
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c       |  4 +-
 9 files changed, 102 insertions(+), 140 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 7e858a113f..160126f6d6 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 		}
 
 		/* Allocate DQ storage for the DPCI Rx queues */
-		rxq = &(dpci_node->rx_queue[i]);
-		rxq->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_BUS_ERR("q_storage allocation failed");
-			ret = -ENOMEM;
+		rxq = &dpci_node->rx_queue[i];
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed");
-			goto err;
-		}
 	}
 
 	/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 
 err:
 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
-		struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
 
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 	rte_free(dpci_node);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 4aec7b2cd8..a8afc772fd 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
 	}
 }
 
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	int i = 0;
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
-		q_storage->dq_storage[i] = rte_malloc(NULL,
+		q_storage->dq_storage[i] = rte_zmalloc(NULL,
 			dpaa2_dqrr_size * sizeof(struct qbman_result),
 			RTE_CACHE_LINE_SIZE);
 		if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	}
 	return 0;
 fail:
-	while (--i >= 0)
+	while (--i >= 0) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
+	}
 
 	return -1;
 }
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..1ce481c88d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
 	uint64_t tx_pkts;
 	uint64_t err_pkts;
 	union {
-		struct queue_storage_info_t *q_storage;
+		/**Ingress*/
+		struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+		/**Egress*/
 		struct qbman_result *cscn;
 	};
 	struct rte_event ev;
@@ -186,6 +188,38 @@ struct swp_active_dqs {
 	uint64_t reserved[7];
 };
 
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+	int ret = 0, i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		(q)->q_storage[i] = rte_zmalloc(NULL, \
+			sizeof(struct queue_storage_info_t), \
+			RTE_CACHE_LINE_SIZE); \
+		if (!(q)->q_storage[i]) { \
+			ret = -ENOBUFS; \
+			break; \
+		} \
+		ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+		if (ret) \
+			break; \
+	} \
+	ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+	int i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		if ((q)->q_storage[i]) { \
+			dpaa2_free_dq_storage((q)->q_storage[i]); \
+			rte_free((q)->q_storage[i]); \
+			(q)->q_storage[i] = NULL; \
+		} \
+	} \
+})
+
 #define NUM_MAX_SWP 64
 
 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 2cdf9308f8..62e381cd1a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1893,7 +1893,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
@@ -1984,10 +1984,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (qp->rx_vq.q_storage) {
-		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
-		rte_free(qp->rx_vq.q_storage);
-	}
+	dpaa2_queue_storage_free(&qp->rx_vq, 1);
 	rte_mempool_free(qp->fle_pool);
 	rte_free(qp);
 
@@ -2038,18 +2035,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->rx_vq.crypto_data = dev->data;
 	qp->tx_vq.crypto_data = dev->data;
-	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
-		sizeof(struct queue_storage_info_t),
-		RTE_CACHE_LINE_SIZE);
-	if (!qp->rx_vq.q_storage) {
-		DPAA2_SEC_ERR("malloc failed for q_storage");
-		return -ENOMEM;
-	}
-	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
-	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
-		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
-		return -ENOMEM;
+	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+	if (retcode) {
+		dpaa2_queue_storage_free((&qp->rx_vq), 1);
+		return retcode;
 	}
 
 	dev->data->queue_pairs[qp_id] = qp;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 883584a6e2..fb0408f8ad 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
  */
 
 #include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 6c77dc32c4..3a6aa69e8b 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -862,7 +862,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		dpaa2_dqrr_size : nb_cpls;
 	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
 	fqid = rxq->fqid;
-	q_storage = rxq->q_storage;
+	q_storage = rxq->q_storage[0];
 
 	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
@@ -1070,13 +1070,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 				qdma_dev->vqs[i].ring_cntx_idx = NULL;
 			}
 			rxq = &dpdmai_dev->rx_queue[i];
-			if (rxq->q_storage) {
-				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
-					dev->data->dev_name, i);
-				dpaa2_free_dq_storage(rxq->q_storage);
-				rte_free(rxq->q_storage);
-				rxq->q_storage = NULL;
-			}
+			dpaa2_queue_storage_free(rxq, 1);
 		}
 		rte_free(qdma_dev->vqs);
 		qdma_dev->vqs = NULL;
@@ -1132,24 +1126,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		qdma_dev->vqs[i].vq_id = i;
 		rxq = &dpdmai_dev->rx_queue[i];
 		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq->q_storage = rte_zmalloc(NULL,
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto alloc_failed;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto alloc_failed;
-		}
 	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1160,11 +1139,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 alloc_failed:
 	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	rte_free(qdma_dev->vqs);
@@ -1358,11 +1333,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
 	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	if (qdma_dev->vqs) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 88374ea905..c411ad5a97 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	uint8_t num_rxqueue_per_tc;
 	struct dpaa2_queue *mc_q, *mcq;
 	uint32_t tot_queues;
-	int i;
+	int i, ret = 0;
 	struct dpaa2_queue *dpaa2_q;
 
 	PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < priv->nb_rx_queues; i++) {
 		mc_q->eth_data = dev->data;
 		priv->rx_vq[i] = mc_q++;
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_q->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
-			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+		dpaa2_q = priv->rx_vq[i];
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
 	}
 
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 		if (!priv->rx_err_vq)
 			goto fail;
 
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
-					sizeof(struct queue_storage_info_t) *
-					RTE_MAX_LCORE,
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
+		dpaa2_q = priv->rx_err_vq;
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		for (i = 0; i < RTE_MAX_LCORE; i++)
-			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
-				goto fail;
 	}
 
 	for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 			mc_q->tc_index = i;
 			mc_q->flow_id = 0;
 			priv->tx_conf_vq[i] = mc_q++;
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-			dpaa2_q->q_storage =
-				rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-			if (!dpaa2_q->q_storage)
-				goto fail_tx_conf;
-
-			memset(dpaa2_q->q_storage, 0,
-			       sizeof(struct queue_storage_info_t));
-			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+			dpaa2_q = priv->tx_conf_vq[i];
+			ret = dpaa2_queue_storage_alloc(dpaa2_q,
+					RTE_MAX_LCORE);
+			if (ret)
 				goto fail_tx_conf;
 		}
 	}
 
 	vq_id = 0;
 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
-		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+		mcq = priv->rx_vq[vq_id];
 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 		vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 fail_tx_conf:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->tx_conf_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->tx_conf_vq[i--] = NULL;
 	}
 	i = priv->nb_tx_queues;
 fail_tx:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+		dpaa2_q = priv->tx_vq[i];
 		rte_free(dpaa2_q->cscn);
 		priv->tx_vq[i--] = NULL;
 	}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	i -= 1;
 	mc_q = priv->rx_vq[0];
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->rx_vq[i--] = NULL;
 	}
 
 	if (dpaa2_enable_err_queue) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		if (dpaa2_q->q_storage)
-			dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_err_vq;
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 	}
 
 	rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 	if (priv->rx_vq[0]) {
 		/* cleaning up queue storage */
 		for (i = 0; i < priv->nb_rx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-			rte_free(dpaa2_q->q_storage);
+			dpaa2_q = priv->rx_vq[i];
+			dpaa2_queue_storage_free(dpaa2_q,
+				RTE_MAX_LCORE);
 		}
 		/* cleanup tx queue cscn */
 		for (i = 0; i < priv->nb_tx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+			dpaa2_q = priv->tx_vq[i];
 			rte_free(dpaa2_q->cscn);
 		}
 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
 			/* cleanup tx conf queue storage */
 			for (i = 0; i < priv->nb_tx_queues; i++) {
-				dpaa2_q = (struct dpaa2_queue *)
-						priv->tx_conf_vq[i];
-				rte_free(dpaa2_q->q_storage);
+				dpaa2_q = priv->tx_conf_vq[i];
+				dpaa2_queue_storage_free(dpaa2_q,
+					RTE_MAX_LCORE);
 			}
 		}
 		/*free memory for all queues (RX+TX) */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 92e9dd40dc..376291af04 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
 	if (unlikely(dpaa2_enable_err_queue))
 		dump_err_pkts(priv->rx_err_vq);
 
@@ -958,7 +960,7 @@ uint16_t
 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ */
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
 	struct qbman_pull_desc pulldesc;
 	struct qbman_eq_desc eqdesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
 	/* todo - currently we are using 1st TX queue only for loopback*/
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
 		ret = dpaa2_affine_qbman_ethrx_swp();
 		if (ret) {
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index de8c024abb..34a3c4f6af 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
  */
 
 #include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
 
 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
-	dq_storage = rxq->q_storage->dq_storage[0];
+	dq_storage = rxq->q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 04/15] dma/dpaa2: add short FD support
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (2 preceding siblings ...)
  2024-10-08 10:57             ` [v5 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
@ 2024-10-08 10:57             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
                               ` (10 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:57 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Short FD can be used for single transfer scenario which shows higher
performance than FLE.
1) Save index context in FD att field for short and FLE(NonSG).
2) Identify FD type by att of FD.
3) Force 48 bits address for source address and fle according to spec.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |   2 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 314 +++++++++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  69 ++++--
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  13 -
 4 files changed, 286 insertions(+), 112 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index eeeb5d52a8..6ebf7ac030 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -81,3 +81,5 @@ Device Arguments
   usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
 * Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
   usage example: ``fslmc:dpdmai.1,desc_debug=1``
+* Use dev arg option ``short_fd=1`` to enable short FDs.
+  usage example: ``fslmc:dpdmai.1,short_fd=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 3a6aa69e8b..23ecf4c5ac 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -16,6 +16,7 @@
 
 #define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
 #define DPAA2_QDMA_DESC_DEBUG "desc_debug"
+#define DPAA2_QDMA_USING_SHORT_FD "short_fd"
 
 static uint32_t dpaa2_coherent_no_alloc_cache;
 static uint32_t dpaa2_coherent_alloc_cache;
@@ -560,7 +561,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 	const struct qdma_cntx_fle_sdd *fle_sdd;
 	const struct qdma_sdd *sdd;
 	const struct qdma_cntx_sg *cntx_sg = NULL;
-	const struct qdma_cntx_long *cntx_long = NULL;
 
 	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
 	sdd = fle_sdd->sdd;
@@ -583,11 +583,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		QBMAN_FLE_WORD4_FMT_SGE) {
 		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
 			fle_sdd);
-	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
 		QBMAN_FLE_WORD4_FMT_SBF) {
-		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
-			fle_sdd);
-	} else {
 		DPAA2_QDMA_ERR("Unsupported fle format:%d",
 			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
 		return;
@@ -598,11 +595,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		dpaa2_qdma_sdd_dump(&sdd[i]);
 	}
 
-	if (cntx_long) {
-		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
-			cntx_long->cntx_idx);
-	}
-
 	if (cntx_sg) {
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
@@ -620,6 +612,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
 				cntx_sg->cntx_idx[i]);
 		}
+	} else {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx");
 	}
 }
 
@@ -682,7 +676,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		offsetof(struct qdma_cntx_sg, fle_sdd) +
 		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
@@ -718,6 +712,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
+	dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -734,74 +729,178 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static inline void
+qdma_populate_fd_pci(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd,
+	struct dpaa2_qdma_rbp *rbp, int ser)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits(src);
+	fd->simple_pci.saddr_hi = upper_32_bits(src);
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+
+	fd->simple_pci.sportid = rbp->sportid;
+
+	fd->simple_pci.svfid = rbp->svfid;
+	fd->simple_pci.spfid = rbp->spfid;
+	fd->simple_pci.svfa = rbp->svfa;
+	fd->simple_pci.dvfid = rbp->dvfid;
+	fd->simple_pci.dpfid = rbp->dpfid;
+	fd->simple_pci.dvfa = rbp->dvfa;
+
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits(dest);
+	fd->simple_pci.daddr_hi = upper_32_bits(dest);
+}
+
+static inline void
+qdma_populate_fd_ddr(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd, int ser)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(src);
+	fd->simple_ddr.saddr_hi = upper_32_bits(src);
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss.
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits(dest);
+	fd->simple_ddr.daddr_hi = upper_32_bits(dest);
+}
+
 static int
-dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
+dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long = NULL;
-	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) {
+		/** PCIe EP*/
+		qdma_populate_fd_pci(src,
+			dst, length,
+			fd, &qdma_vq->rbp,
+			is_silent ? 0 : 1);
+	} else {
+		/** DDR or PCIe RC*/
+		qdma_populate_fd_ddr(src,
+			dst, length,
+			fd, is_silent ? 0 : 1);
+	}
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_SHORT);
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
+	} else {
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
+	}
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
+{
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_fle_sdd *fle_sdd = NULL;
+	rte_iova_t fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	if (is_silent) {
+		fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
-			(void **)&cntx_long);
+			(void **)&fle_sdd);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_long);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
-#endif
+	fle = fle_sdd->fle;
+	fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset;
 
-	fle = cntx_long->fle_sdd.fle;
-	fle_iova = cntx_iova +
-		offsetof(struct qdma_cntx_long, fle_sdd) +
-		offsetof(struct qdma_cntx_fle_sdd, fle);
-
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)fle);
 
 	if (qdma_vq->fle_pre_populate) {
 		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
-			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+			fle_sdd_pre_populate(fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
-			if (!qdma_dev->is_silent && cntx_long) {
-				cntx_long->cntx_idx =
-					DPAA2_QDMA_IDX_FROM_FLAG(flags);
-			}
 		}
 
 		fle_post_populate(fle, src, dst, length);
 	} else {
-		sdd = cntx_long->fle_sdd.sdd;
-		sdd_iova = cntx_iova +
-			offsetof(struct qdma_cntx_long, fle_sdd) +
-			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		sdd = fle_sdd->sdd;
+		sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset;
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
 			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
-		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
+		dpaa2_qdma_long_fmt_dump(fle);
 
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_LONG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -809,15 +908,89 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
 }
 
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	if (qdma_vq->using_short_fd)
+		return dpaa2_qdma_short_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+	else
+		return dpaa2_qdma_long_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+}
+
+static inline int
+dpaa2_qdma_dq_fd(const struct qbman_fd *fd,
+	struct qdma_virt_queue *qdma_vq,
+	uint16_t *free_space, uint16_t *fle_elem_nb)
+{
+	uint16_t idx, att;
+	enum dpaa2_qdma_fd_type type;
+	int ret;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+
+	att = dpaa2_qdma_fd_get_att(fd);
+	type = DPAA2_QDMA_FD_ATT_TYPE(att);
+	if (type == DPAA2_QDMA_FD_SHORT) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_LONG) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_SG) {
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, free_space);
+		if (unlikely(ret < cntx_sg->job_nb))
+			return -ENOSPC;
+
+		return 0;
+	}
+
+	DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x",
+		fd->simple_ddr.rsv1_att);
+	return -EIO;
+}
+
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
@@ -837,10 +1010,6 @@ dpaa2_qdma_dequeue(void *dev_private,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	int ret, pull_size;
-	struct qbman_fle *fle;
-	struct qdma_cntx_fle_sdd *fle_sdd;
-	struct qdma_cntx_sg *cntx_sg;
-	struct qdma_cntx_long *cntx_long;
 	uint16_t free_space = 0, fle_elem_nb = 0;
 
 	if (unlikely(qdma_dev->is_silent))
@@ -939,25 +1108,8 @@ dpaa2_qdma_dequeue(void *dev_private,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
-		fle = fle_sdd->fle;
-		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
-		fle_elem_nb++;
-		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
-			QBMAN_FLE_WORD4_FMT_SGE) {
-			cntx_sg = container_of(fle_sdd,
-				struct qdma_cntx_sg, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				cntx_sg->cntx_idx,
-				cntx_sg->job_nb, &free_space);
-		} else {
-			cntx_long = container_of(fle_sdd,
-				struct qdma_cntx_long, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				&cntx_long->cntx_idx,
-				1, &free_space);
-		}
-		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
+		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -982,8 +1134,10 @@ dpaa2_qdma_dequeue(void *dev_private,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	rte_mempool_put_bulk(qdma_vq->fle_pool,
-		qdma_vq->fle_elem, fle_elem_nb);
+	if (fle_elem_nb > 0) {
+		rte_mempool_put_bulk(qdma_vq->fle_pool,
+			qdma_vq->fle_elem, fle_elem_nb);
+	}
 
 	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
 		cntx_idx, nb_cpls);
@@ -1212,11 +1366,14 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	else
 		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_USING_SHORT_FD))
+		qdma_dev->vqs[vchan].using_short_fd = 1;
+	else
+		qdma_dev->vqs[vchan].using_short_fd = 0;
+
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
-			    sizeof(struct qdma_cntx_long));
-
+	pool_size = sizeof(struct qdma_cntx_sg);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
 			DPAA2_QDMA_MAX_DESC * 2, pool_size,
 			512, 0, NULL, NULL, NULL, NULL,
@@ -1236,7 +1393,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 			return ret;
 		}
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
-				(void **)qdma_dev->vqs[vchan].cntx_long,
+				(void **)qdma_dev->vqs[vchan].cntx_fle_sdd,
 				DPAA2_QDMA_MAX_DESC);
 		if (ret) {
 			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
@@ -1603,5 +1760,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
 	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
-	DPAA2_QDMA_DESC_DEBUG"=<int>");
+	DPAA2_QDMA_DESC_DEBUG"=<int>"
+	DPAA2_QDMA_USING_SHORT_FD"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 371393cb85..0be65e1cc6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -12,17 +12,8 @@
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
-#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT		(1ULL << 0)
-#define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
-#define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
-
-/** FLE pool cache size */
-#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
-
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
 #define DPAA2_RBP_MEM_RW            0x0
@@ -40,9 +31,14 @@
 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
-#define MAX_HW_QUEUE_PER_CORE		64
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
+#define DPAA2_QDMA_FD_LONG_FORMAT 0x1
+#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
 
-#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+#define DPAA2_QDMA_BMT_ENABLE 0x1
+#define DPAA2_QDMA_BMT_DISABLE 0x0
 
 /** Source/Destination Descriptor */
 struct qdma_sdd {
@@ -99,8 +95,8 @@ struct qdma_sdd {
 #define QDMA_SG_SL_SHORT	0x1 /* short length */
 #define QDMA_SG_SL_LONG	0x0 /* long length */
 #define QDMA_SG_F	0x1 /* last sg entry */
-#define QDMA_SG_BMT_ENABLE 0x1
-#define QDMA_SG_BMT_DISABLE 0x0
+#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
+#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
 
 struct qdma_sg_entry {
 	uint32_t addr_lo;		/* address 0:31 */
@@ -166,6 +162,40 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum dpaa2_qdma_fd_type {
+	DPAA2_QDMA_FD_SHORT = 1,
+	DPAA2_QDMA_FD_LONG = 2,
+	DPAA2_QDMA_FD_SG = 3
+};
+
+#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_TYPE(att) \
+	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
+#define DPAA2_QDMA_FD_ATT_CNTX(att) \
+	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+
+static inline void
+dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
+	uint64_t addr)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
+	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
+}
+
+static inline void
+dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
+	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
+{
+	fd->simple_ddr.rsv1_att = job_idx |
+		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
+}
+
+static inline uint16_t
+dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
+{
+	return fd->simple_ddr.rsv1_att;
+}
+
 enum {
 	DPAA2_QDMA_SDD_FLE,
 	DPAA2_QDMA_SRC_FLE,
@@ -193,12 +223,6 @@ struct qdma_cntx_sg {
 	uint16_t rsv[3];
 } __rte_packed;
 
-struct qdma_cntx_long {
-	struct qdma_cntx_fle_sdd fle_sdd;
-	uint16_t cntx_idx;
-	uint16_t rsv[3];
-} __rte_packed;
-
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
 
@@ -241,6 +265,7 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	uint64_t fle_iova2va_offset;
 	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
@@ -252,6 +277,7 @@ struct qdma_virt_queue {
 	uint64_t num_enqueues;
 	/* Total number of dequeues from this VQ */
 	uint64_t num_dequeues;
+	uint64_t copy_num;
 
 	uint16_t vq_id;
 	uint32_t flags;
@@ -261,10 +287,11 @@ struct qdma_virt_queue {
 
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
-	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
 	uint16_t silent_idx;
 
 	int num_valid_jobs;
+	int using_short_fd;
 
 	struct rte_dma_stats stats;
 };
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index e49604c8fc..df21b39cae 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,19 +7,6 @@
 
 #include <rte_compat.h>
 
-#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
-#define RTE_DPAA2_QDMA_LEN_MASK \
-	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
-	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
-
-#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
-	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
-	((length) & RTE_DPAA2_QDMA_LEN_MASK)
-
 #define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
 #define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
 	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 05/15] dma/dpaa2: limit the max descriptor number
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (3 preceding siblings ...)
  2024-10-08 10:57             ` [v5 04/15] dma/dpaa2: add short FD support Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
                               ` (9 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For non-SG format, the index is saved in FD with
DPAA2_QDMA_FD_ATT_TYPE_OFFSET(13) bits width.

The max descriptor number of ring is power of 2, so the
eventual max number is:
((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) / 2)

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 0be65e1cc6..250c83c83c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -8,8 +8,6 @@
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-#define DPAA2_QDMA_MAX_DESC		4096
-#define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
 #define DPAA2_DPDMAI_MAX_QUEUES	16
@@ -169,10 +167,15 @@ enum dpaa2_qdma_fd_type {
 };
 
 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_MAX_IDX \
+	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
-	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
+
+#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
+#define DPAA2_QDMA_MIN_DESC 1
 
 static inline void
 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
@@ -186,6 +189,7 @@ static inline void
 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
 {
+	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
 	fd->simple_ddr.rsv1_att = job_idx |
 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 06/15] dma/dpaa2: change the DMA copy return value
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (4 preceding siblings ...)
  2024-10-08 10:58             ` [v5 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
                               ` (8 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The return value of DMA copy/sg copy should be index of
descriptor copied in success.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 23ecf4c5ac..180ffb3468 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -644,6 +644,11 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -ENOTSUP;
 	}
 
+	if (unlikely(!nb_src)) {
+		DPAA2_QDMA_ERR("No SG entry specified");
+		return -EINVAL;
+	}
+
 	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
 			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
@@ -720,10 +725,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num += nb_src;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num += nb_src;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 07/15] dma/dpaa2: move the qdma header to common place
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (5 preceding siblings ...)
  2024-10-08 10:58             ` [v5 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 08/15] dma/dpaa: refactor driver Gagandeep Singh
                               ` (7 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include rte_pmd_dpaax_qdma.h instead of rte_pmd_dpaa2_qdma.h
and change code accordingly.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/api/doxy-api-index.md                 |  2 +-
 doc/api/doxy-api.conf.in                  |  2 +-
 drivers/common/dpaax/meson.build          |  3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h | 23 +++++++
 drivers/dma/dpaa2/dpaa2_qdma.c            | 84 +++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h            | 10 +--
 drivers/dma/dpaa2/meson.build             |  4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h    | 23 -------
 8 files changed, 72 insertions(+), 79 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f9f0300126..5a4411eb4a 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
   [mlx5](@ref rte_pmd_mlx5.h),
   [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
   [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
-  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+  [dpaax](@ref rte_pmd_dpaax_qdma.h),
   [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
   [dlb2](@ref rte_pmd_dlb2.h),
   [ifpga](@ref rte_pmd_ifpga.h)
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a8823c046f..33250d867c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -8,7 +8,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/cnxk \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/dma/dpaa2 \
+                          @TOPDIR@/drivers/common/dpaax \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/event/cnxk \
                           @TOPDIR@/drivers/mempool/cnxk \
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
index a162779116..db61b76ce3 100644
--- a/drivers/common/dpaax/meson.build
+++ b/drivers/common/dpaax/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
 
 if not is_linux
     build = false
@@ -16,3 +16,4 @@ endif
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
+headers = files('rte_pmd_dpaax_qdma.h')
diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
new file mode 100644
index 0000000000..2552a4adfb
--- /dev/null
+++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 180ffb3468..c36cf6cbe6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,7 +10,7 @@
 
 #include <mc/fsl_dpdmai.h>
 
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
@@ -251,16 +251,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 *
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
@@ -274,23 +274,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+	for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
 		/* source SG */
 		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		/* destination SG */
 		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 	}
 }
 
@@ -389,21 +387,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
 		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
 		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
@@ -483,17 +479,16 @@ fle_populate(struct qbman_fle fle[],
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
@@ -599,7 +594,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
 		if (!cntx_sg->job_nb ||
-			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
 			DPAA2_QDMA_ERR("Invalid SG job number:%d",
 				cntx_sg->job_nb);
 			return;
@@ -649,9 +644,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -EINVAL;
 	}
 
-	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+	if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
-			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+			nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
 		return -EINVAL;
 	}
 
@@ -670,11 +665,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+	cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
 
 	fle = cntx_sg->fle_sdd.fle;
 	fle_iova = cntx_iova +
@@ -706,8 +697,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			offsetof(struct qdma_cntx_sg, sg_src_entry);
 		dst_sge_iova = cntx_iova +
 			offsetof(struct qdma_cntx_sg, sg_dst_entry);
-		len = sg_entry_populate(src, dst,
-			cntx_sg, nb_src);
+		len = sg_entry_populate(src, dst, cntx_sg, nb_src);
 
 		fle_populate(fle, sdd, sdd_iova,
 			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1050,7 +1040,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 			q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			DPAA2_VADDR_TO_IOVA(dq_storage), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
 			       get_swp_active_dqs(
@@ -1085,7 +1075,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
-		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
 
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
@@ -1117,7 +1107,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
-		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -1170,11 +1160,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 		RTE_DMA_CAPA_SILENT |
 		RTE_DMA_CAPA_OPS_COPY |
 		RTE_DMA_CAPA_OPS_COPY_SG;
-	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
-	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1355,6 +1345,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	uint32_t pool_size;
 	char pool_name[64];
 	int ret;
+	uint64_t iova, va;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1390,6 +1381,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
+	iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+	va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+	qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
 
 	if (qdma_dev->is_silent) {
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 250c83c83c..0fd1debaf8 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
 
 struct qdma_cntx_sg {
 	struct qdma_cntx_fle_sdd fle_sdd;
-	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
 	uint16_t job_nb;
 	uint16_t rsv[3];
 } __rte_packed;
 
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
-	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
 
 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
-	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
 
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build
index a99151e2a5..a523f5edb4 100644
--- a/drivers/dma/dpaa2/meson.build
+++ b/drivers/dma/dpaa2/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
 
 if not is_linux
     build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
deleted file mode 100644
index df21b39cae..0000000000
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
-	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
-	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
-	(((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
-	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 08/15] dma/dpaa: refactor driver
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (6 preceding siblings ...)
  2024-10-08 10:58             ` [v5 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
                               ` (6 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch refactor the DPAA DMA driver code with changes:
 - HW descriptors rename and update with details.
 - update qdma engine and queue structures
 - using rte_ring APIs for enqueue and dequeue.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 1330 ++++++++++++++++++----------------
 drivers/dma/dpaa/dpaa_qdma.h |  222 +++---
 2 files changed, 864 insertions(+), 688 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 3d4fd818f8..a10a867580 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #include <bus_dpaa_driver.h>
@@ -8,69 +8,71 @@
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static uint32_t s_sg_max_entry_sz = 2000;
+
 static inline void
-qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
 {
 	ccdf->addr_hi = upper_32_bits(addr);
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+static inline void
+qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
 {
-	return ccdf->cfg8b_w1 & 0xff;
+	sge->addr_hi = upper_32_bits(addr);
+	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
+	uint8_t *queue_idx)
 {
-	return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_OFFSET;
-}
+	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
+
+	if (addr && queue_idx)
+		*queue_idx = ccdf->queue;
+	if (addr) {
+		ccdf->addr_hi = 0;
+		ccdf->addr_lo = 0;
+		return true;
+	}
 
-static inline void
-qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-{
-	ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
+	return false;
 }
 
 static inline int
-qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+ilog2(int x)
 {
-	return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_STATUS;
-}
+	int log = 0;
 
-static inline void
-qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
-{
-	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
+	x >>= 1;
+
+	while (x) {
+		log++;
+		x >>= 1;
+	}
+	return log;
 }
 
-static inline void
-qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qsize(uint32_t q_size)
 {
-	csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
+	return (ilog2(q_size) - ilog2(64));
 }
 
-static inline void
-qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qthld(uint32_t q_thld)
 {
-	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+	return (ilog2(q_thld) - ilog2(16));
 }
 
 static inline int
-ilog2(int x)
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
 {
-	int log = 0;
-
-	x >>= 1;
+	struct rte_dma_stats *stats = &fsl_queue->stats;
 
-	while (x) {
-		log++;
-		x >>= 1;
-	}
-	return log;
+	return (stats->submitted - stats->completed);
 }
 
 static u32
@@ -97,12 +99,12 @@ qdma_writel_be(u32 val, void *addr)
 	QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
-	virt_addr = rte_malloc("dma pool alloc", size, aligned);
+	virt_addr = rte_zmalloc(nm, size, aligned);
 	if (!virt_addr)
 		return NULL;
 
@@ -111,268 +113,221 @@ static void
 	return virt_addr;
 }
 
-static void
-dma_pool_free(void *addr)
-{
-	rte_free(addr);
-}
-
-static void
-fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
-	int id;
-
-	if (--fsl_queue->count)
-		goto finally;
-
-	id = (fsl_qdma->block_base - fsl_queue->block_base) /
-	      fsl_qdma->block_offset;
-
-	while (rte_atomic32_read(&wait_task[id]) == 1)
-		rte_delay_us(QDMA_DELAY);
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_used,	list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-finally:
-	fsl_qdma->desc_allocated--;
-}
-
-static void
-fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-				      dma_addr_t dst, dma_addr_t src, u32 len)
-{
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
-
-	/* Note: command table (fsl_comp->virt_addr) is getting filled
-	 * directly in cmd descriptors of queues while enqueuing the descriptor
-	 * please refer fsl_qdma_enqueue_desc
-	 * frame list table (virt_addr) + 1) and source,
-	 * destination descriptor table
-	 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
-	 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
-	 */
-	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-
-	/* Status notification is enqueued to status queue. */
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-}
-
 /*
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-					struct fsl_qdma_queue *queue,
-					int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *csgf_desc;
-	int i;
-
-	for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
-		comp_temp = rte_zmalloc("qdma: comp temp",
-					sizeof(*comp_temp), 0);
-		if (!comp_temp)
-			return -ENOMEM;
-
-		comp_temp->virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
-		if (!comp_temp->virt_addr) {
-			rte_free(comp_temp);
+	struct fsl_qdma_comp_cmd_desc *ccdf;
+	uint16_t i, j;
+	struct fsl_qdma_cmpd_ft *ft;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		dma_addr_t phy_ft = 0;
+
+		queue->ft[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_cmpd_ft),
+			RTE_CACHE_LINE_SIZE, &phy_ft);
+		if (!queue->ft[i])
+			goto fail;
+		if (((uint64_t)queue->ft[i]) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
+				i, queue->ft[i]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		comp_temp->desc_virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
-		if (!comp_temp->desc_virt_addr) {
-			rte_free(comp_temp->virt_addr);
-			rte_free(comp_temp);
+		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
+				i, &queue->ft[i]->desc_ssge[0]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(comp_temp->desc_virt_addr, 0,
-		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
-
-		csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
-		sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
-		ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+		queue->ft[i]->phy_ssge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
+		queue->ft[i]->phy_dsge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
+
+		ft = queue->ft[i];
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(csgf_desc, 32);
+		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
+
 		/* Descriptor Buffer */
-		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
-				FSL_QDMA_CMD_LWC_OFFSET);
-
-		list_add_tail(&comp_temp->list, &queue->comp_free);
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+
+		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
+		ddf->lwc = FSL_QDMA_CMD_LWC;
+
+		ccdf = &queue->cq[i];
+		qdma_desc_addr_set64(ccdf, phy_ft);
+		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
+
+		ccdf->queue = queue->queue_id;
 	}
+	queue->ci = 0;
 
 	return 0;
 
 fail:
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		rte_free(comp_temp->virt_addr);
-		rte_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
+	for (j = 0; j < i; j++)
+		rte_free(queue->ft[j]);
 
 	return -ENOMEM;
 }
 
-/*
- * Request a command descriptor for enqueue.
- */
-static struct fsl_qdma_comp *
-fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+static int
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+	int queue_id, int block_id)
 {
-	struct fsl_qdma_queue *queue = fsl_chan->queue;
-	struct fsl_qdma_comp *comp_temp;
-
-	if (!list_empty(&queue->comp_free)) {
-		comp_temp = list_first_entry(&queue->comp_free,
-					     struct fsl_qdma_comp,
-					     list);
-		list_del(&comp_temp->list);
-		return comp_temp;
+	struct fsl_qdma_queue *cmd_queue;
+	uint32_t queue_size;
+	char nm[RTE_MEMZONE_NAMESIZE];
+
+	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
+	cmd_queue->engine = fsl_qdma;
+
+	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
+		QDMA_QUEUE_SIZE;
+
+	sprintf(nm, "Command queue_%d_%d",
+		block_id, queue_id);
+	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
+		queue_size, &cmd_queue->bus_addr);
+	if (!cmd_queue->cq) {
+		DPAA_QDMA_ERR("%s alloc failed!", nm);
+		return -ENOMEM;
 	}
 
-	return NULL;
-}
-
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
-{
-	struct fsl_qdma_queue *queue_head, *queue_temp;
-	int len, i, j;
-	int queue_num;
-	int blocks;
-	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-
-	queue_num = fsl_qdma->n_queues;
-	blocks = fsl_qdma->num_blocks;
-
-	len = sizeof(*queue_head) * queue_num * blocks;
-	queue_head = rte_zmalloc("qdma: queue head", len, 0);
-	if (!queue_head)
-		return NULL;
+	cmd_queue->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
+	cmd_queue->queue_id = queue_id;
+	cmd_queue->block_id = block_id;
+	cmd_queue->pending_start = 0;
+	cmd_queue->pending_num = 0;
+	cmd_queue->complete_start = 0;
+
+	sprintf(nm, "Compound Table_%d_%d",
+		block_id, queue_id);
+	cmd_queue->ft = rte_zmalloc(nm,
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!cmd_queue->ft) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "Pending_desc_%d_%d",
+		block_id, queue_id);
+	cmd_queue->pending_desc = rte_zmalloc(nm,
+		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
+	if (!cmd_queue->pending_desc) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-burst_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_burst = rte_ring_create(nm,
+		QDMA_QUEUE_SIZE * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_burst) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_desc = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_desc) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-pool-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_pool = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_pool) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_desc);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
 
-	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
-		queue_size[i] = QDMA_QUEUE_SIZE;
+	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
+	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
 
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-				DPAA_QDMA_ERR("Get wrong queue-sizes.");
-				goto fail;
-			}
-			queue_temp = queue_head + i + (j * queue_num);
-
-			queue_temp->cq =
-			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-				       queue_size[i],
-				       sizeof(struct fsl_qdma_format) *
-				       queue_size[i], &queue_temp->bus_addr);
-
-			if (!queue_temp->cq)
-				goto fail;
-
-			memset(queue_temp->cq, 0x0, queue_size[i] *
-			       sizeof(struct fsl_qdma_format));
-
-			queue_temp->block_base = fsl_qdma->block_base +
-				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-			queue_temp->n_cq = queue_size[i];
-			queue_temp->id = i;
-			queue_temp->count = 0;
-			queue_temp->pending = 0;
-			queue_temp->virt_head = queue_temp->cq;
-			queue_temp->stats = (struct rte_dma_stats){0};
-		}
-	}
-	return queue_head;
+	return 0;
+}
 
-fail:
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			queue_temp = queue_head + i + (j * queue_num);
-			dma_pool_free(queue_temp->cq);
-		}
-	}
-	rte_free(queue_head);
+static void
+fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
+{
+	rte_free(queue->ft);
+	rte_free(queue->cq);
+	rte_free(queue->pending_desc);
+	rte_ring_free(queue->complete_burst);
+	rte_ring_free(queue->complete_desc);
+	rte_ring_free(queue->complete_pool);
+}
 
-	return NULL;
+static void
+fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
+{
+	rte_free(queue->cq);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+static int
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+	uint32_t block_id)
 {
-	struct fsl_qdma_queue *status_head;
-	unsigned int status_size;
+	struct fsl_qdma_status_queue *status;
+	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE;
-	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-		DPAA_QDMA_ERR("Get wrong status_size.");
-		return NULL;
-	}
+	status = &fsl_qdma->stat_queues[block_id];
+	status->engine = fsl_qdma;
 
-	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
-	if (!status_head)
-		return NULL;
+	status_size = QDMA_STATUS_SIZE *
+		sizeof(struct fsl_qdma_comp_cmd_desc);
 
-	/*
-	 * Buffer for queue command
-	 */
-	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 &status_head->bus_addr);
-
-	if (!status_head->cq) {
-		rte_free(status_head);
-		return NULL;
-	}
+	status->cq = dma_pool_alloc(NULL, status_size,
+		status_size, &status->bus_addr);
+
+	if (!status->cq)
+		return -ENOMEM;
 
-	memset(status_head->cq, 0x0, status_size *
-	       sizeof(struct fsl_qdma_format));
-	status_head->n_cq = status_size;
-	status_head->virt_head = status_head->cq;
+	memset(status->cq, 0x0, status_size);
+	status->n_cq = QDMA_STATUS_SIZE;
+	status->complete = 0;
+	status->block_id = block_id;
+	status->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	return status_head;
+	return 0;
 }
 
 static int
@@ -420,59 +375,41 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
-				 uint16_t *last_idx,
-				 enum rte_dma_status_code *status)
+static void
+fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
+	uint8_t num, struct fsl_qdma_queue *fsl_queue)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_queue *temp_queue;
-	struct fsl_qdma_format *status_addr;
-	struct fsl_qdma_comp *fsl_comp = NULL;
-	u32 reg, i;
-	int count = 0;
-
-	while (count < nb_cpls) {
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			return count;
-
-		status_addr = fsl_status->virt_head;
-
-		i = qdma_ccdf_get_queue(status_addr) +
-			id * fsl_qdma->n_queues;
-		temp_queue = fsl_queue + i;
-		fsl_comp = list_first_entry(&temp_queue->comp_used,
-					    struct fsl_qdma_comp,
-					    list);
-		list_del(&fsl_comp->list);
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
-		*last_idx = fsl_comp->index;
-		if (status != NULL)
-			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
-
-		list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
-		count++;
-
+	uint32_t i, j;
+	uint8_t *v_src, *v_dst;
+	char err_msg[512];
+	int offset;
+
+
+	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
+		fsl_queue->block_id,
+		fsl_queue->queue_id);
+	for (i = 0; i < num; i++) {
+		v_src = rte_mem_iova2virt(desc[i]->src);
+		v_dst = rte_mem_iova2virt(desc[i]->dst);
+		for (j = 0; j < desc[i]->len; j++) {
+			if (v_src[j] != v_dst[j]) {
+				sprintf(&err_msg[offset],
+					"job[%"PRIu64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
+					desc[i]->flag, v_src, j, v_src[j],
+					v_dst, j, v_dst[j]);
+				DPAA_QDMA_ERR("%s, stop validating!",
+					err_msg);
+				return;
+			}
+		}
 	}
-	return count;
 }
 
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *temp;
+	struct fsl_qdma_status_queue *temp_stat;
 	void *ctrl = fsl_qdma->ctrl_base;
 	void *block;
 	u32 i, j;
@@ -489,8 +426,8 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		for (i = 0; i < fsl_qdma->n_queues; i++) {
-			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+		for (i = 0; i < QDMA_QUEUES; i++) {
+			temp = &fsl_qdma->cmd_queues[j][i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -531,18 +468,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEEPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEPAR);
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEDPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQDPAR);
+		temp_stat = &fsl_qdma->stat_queues[j];
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEEPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEPAR);
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEDPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -551,7 +485,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+		val = ilog2_qsize(temp_stat->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -563,158 +497,389 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static void *
-fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
-			   dma_addr_t src, size_t len,
-			   void *call_back,
-			   void *param)
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
 {
-	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
 
-	fsl_comp =
-	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
-	if (!fsl_comp)
-		return NULL;
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	fsl_comp->qchan = fsl_chan;
-	fsl_comp->call_back_func = call_back;
-	fsl_comp->params = param;
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
 
-	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-	return (void *)fsl_comp;
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret)
+				rte_panic("DQ desc number failed!\n");
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				rte_panic("DQ %d descs failed!(%d)\n",
+					*dq_complete, ret);
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				rte_panic("Failed desc eq %d!=%d to %s\n",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
-				  struct fsl_qdma_comp *fsl_comp,
-				  uint64_t flags)
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	uint16_t num)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	struct fsl_qdma_format *ccdf;
-	u32 reg;
+	uint16_t i, idx, start, dq;
+	int ret, dq_cnt;
+
 
-	/* retrieve and store the register value in big endian
-	 * to avoid bits swap
-	 */
-	reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
-		return -1;
-
-	/* filling descriptor  command table */
-	ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
-	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
-	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
-	fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
-	fsl_queue->virt_head++;
-
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
-		fsl_queue->virt_head = fsl_queue->cq;
-
-	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
-
-	if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->stats.submitted++;
-	} else {
-		fsl_queue->pending++;
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+eq_again:
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		dq_cnt = 0;
+dq_again:
+		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
+			fsl_queue->block_id);
+		dq_cnt++;
+		if (dq > 0) {
+			goto eq_again;
+		} else {
+			if (dq_cnt < 100)
+				goto dq_again;
+			DPAA_QDMA_ERR("%s: Dq block%d failed!",
+				__func__, fsl_queue->block_id);
+		}
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			rte_panic("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0;
+	uint16_t blk_drain, check_num, drain_num;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
+
+	check_num = 0;
+overflow_check:
+	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+
+	if (likely(!overflow))
+		return 0;
+
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
+		drain_num++;
+		if (drain_num >= 10000) {
+			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			drain_num = 0;
+		}
+		goto drain_again;
+	}
+	check_num++;
+	if (check_num >= 1000) {
+		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
+		check_num = 0;
 	}
-	return fsl_comp->index;
+	goto overflow_check;
+
+	return 0;
 }
 
 static int
-fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
 
-	if (fsl_queue->count++)
-		goto finally;
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
 
-	INIT_LIST_HEAD(&fsl_queue->comp_free);
-	INIT_LIST_HEAD(&fsl_queue->comp_used);
+	ft = fsl_queue->ft[fsl_queue->ci];
 
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-				FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
-	if (ret) {
-		DPAA_QDMA_ERR(
-			"failed to alloc dma buffer for comp descriptor");
-		goto exit;
-	}
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
 
-finally:
-	return fsl_qdma->desc_allocated++;
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
 
-exit:
-	return -ENOMEM;
+	return 0;
 }
 
 static int
-dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	      uint32_t info_sz)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	uint32_t total_len;
+	uint16_t start, idx, num, i, next_idx;
+	int ret;
 
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+eq_sg:
+	total_len = 0;
+	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
+		fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num--;
+		}
+		if (fsl_queue->pending_num > 0)
+			goto eq_sg;
 
-	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 1;
-	dev_info->max_desc = DPAADMA_MAX_DESC;
-	dev_info->min_desc = DPAADMA_MIN_DESC;
+		return ret;
+	}
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
+		num = FSL_QDMA_SG_MAX_ENTRY;
+	else
+		num = fsl_queue->pending_num;
+
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+
+	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
+	csgf_src->extion = 1;
+	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
+	csgf_dest->extion = 1;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
+			fsl_queue->pending_desc[idx].src);
+		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_ssge[i].final = 0;
+		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
+			fsl_queue->pending_desc[idx].dst);
+		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_dsge[i].final = 0;
+		total_len += fsl_queue->pending_desc[idx].len;
+		if ((i + 1) != num) {
+			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
+			if (fsl_queue->pending_desc[next_idx].len >
+				s_sg_max_entry_sz) {
+				num = i + 1;
+				break;
+			}
+		}
+	}
+
+	ft->desc_ssge[num - 1].final = 1;
+	ft->desc_dsge[num - 1].final = 1;
+	csgf_src->length = total_len;
+	csgf_dest->length = total_len;
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
+	if (ret)
+		return ret;
+
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	fsl_queue->pending_start =
+		(start + num) & (fsl_queue->pending_max - 1);
+	fsl_queue->pending_num -= num;
+	if (fsl_queue->pending_num > 0)
+		goto eq_sg;
 
 	return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 {
-	u32 i, start, end;
+	uint16_t start = fsl_queue->pending_start;
 	int ret;
 
-	start = fsl_qdma->free_block_id * QDMA_QUEUES;
-	fsl_qdma->free_block_id++;
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
 
-	end = start + 1;
-	for (i = start; i < end; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+static int
+dpaa_qdma_info_get(const struct rte_dma_dev *dev,
+	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
-		if (fsl_chan->free) {
-			fsl_chan->free = false;
-			ret = fsl_qdma_alloc_chan_resources(fsl_chan);
-			if (ret)
-				return ret;
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= DPAA_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = fsl_qdma->n_queues;
+	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
+	dev_info->min_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
 
-			fsl_qdma->vchan_map[vchan] = i;
-			return 0;
+	return 0;
+}
+
+static int
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+	uint16_t vchan)
+{
+	int ret, i, j, found = 0;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	if (fsl_queue) {
+		found = 1;
+		goto queue_found;
+	}
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_queue = &fsl_qdma->cmd_queues[i][j];
+
+			if (fsl_queue->channel_id == vchan) {
+				found = 1;
+				fsl_qdma->chan[vchan] = fsl_queue;
+				goto queue_found;
+			}
 		}
 	}
 
-	return -1;
-}
+queue_found:
+	if (!found)
+		return -ENXIO;
 
-static void
-dma_release(void *fsl_chan)
-{
-	((struct fsl_qdma_chan *)fsl_chan)->free = true;
-	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+	if (fsl_queue->used)
+		return 0;
+
+	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
+	if (ret)
+		return ret;
+
+	fsl_queue->used = 1;
+	fsl_qdma->block_queues[fsl_queue->block_id]++;
+
+	return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-		    __rte_unused const struct rte_dma_conf *dev_conf,
-		    __rte_unused uint32_t conf_sz)
+	__rte_unused const struct rte_dma_conf *dev_conf,
+	__rte_unused uint32_t conf_sz)
 {
 	return 0;
 }
@@ -745,148 +910,112 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	u32 reg;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->pending--;
-		fsl_queue->stats.submitted++;
-	}
+	if (!fsl_queue->pending_num)
+		return 0;
 
-	return 0;
+	return fsl_qdma_enqueue_desc(fsl_queue);
 }
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-		  rte_iova_t src, rte_iova_t dst,
-		  uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	uint16_t start = fsl_queue->pending_start;
+	uint8_t pending = fsl_queue->pending_num;
+	uint16_t idx;
 	int ret;
 
-	void *fsl_comp = NULL;
-
-	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
-			(dma_addr_t)dst, (dma_addr_t)src,
-			length, NULL, NULL);
-	if (!fsl_comp) {
-		DPAA_QDMA_DP_DEBUG("fsl_comp is NULL");
-		return -1;
+	if (pending >= fsl_queue->pending_max) {
+		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
+			pending, vchan);
+		return -ENOSPC;
 	}
-	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	idx = (start + pending) & (fsl_queue->pending_max - 1);
+
+	fsl_queue->pending_desc[idx].src = src;
+	fsl_queue->pending_desc[idx].dst = dst;
+	fsl_queue->pending_desc[idx].flag =
+		DPAA_QDMA_IDX_FROM_FLAG(flags);
+	fsl_queue->pending_desc[idx].len = length;
+	fsl_queue->pending_num++;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
 
 	return ret;
 }
 
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			 const uint16_t nb_cpls, uint16_t *last_idx,
-			 enum rte_dma_status_code *st)
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
+
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, st);
-	fsl_queue->stats.completed += intr;
 
-	return intr;
+	return dq_num;
 }
 
-
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-		  uint16_t vchan, const uint16_t nb_cpls,
-		  uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
-	}
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, NULL);
-	fsl_queue->stats.completed += intr;
+	*has_error = false;
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
 
-	return intr;
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	return dq_num;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -903,17 +1032,15 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	fsl_queue->stats = (struct rte_dma_stats){0};
+	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
 	return 0;
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
-	.dev_info_get		  = dpaa_info_get,
+	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
@@ -926,90 +1053,80 @@ static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan;
 	uint64_t phys_addr;
-	unsigned int len;
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i;
+	uint32_t i, j, k;
 
-	fsl_qdma->desc_allocated = 0;
-	fsl_qdma->n_chans = VIRT_CHANNELS;
-	fsl_qdma->n_queues = QDMA_QUEUES;
+	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
 
-	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
-	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
-	if (!fsl_qdma->chans)
-		return -1;
-
-	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
-	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
-	if (!fsl_qdma->status) {
-		rte_free(fsl_qdma->chans);
-		return -1;
-	}
-
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		rte_atomic32_init(&wait_task[i]);
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
-		if (!fsl_qdma->status[i])
-			goto err;
-	}
-
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		goto err;
+		return ccsr_qdma_fd;
 	}
 
-	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 	phys_addr = QDMA_CCSR_BASE;
-	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-					 PROT_WRITE, MAP_SHARED,
-					 ccsr_qdma_fd, phys_addr);
+	fsl_qdma->reg_base = mmap(NULL, regs_size,
+		PROT_READ | PROT_WRITE, MAP_SHARED,
+		ccsr_qdma_fd, phys_addr);
 
 	close(ccsr_qdma_fd);
-	if (fsl_qdma->ctrl_base == MAP_FAILED) {
-		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-		       "size %d", phys_addr, regs_size);
-		goto err;
+	if (fsl_qdma->reg_base == MAP_FAILED) {
+		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIx64"), size(%d)",
+			phys_addr, regs_size);
+		return -ENOMEM;
 	}
 
-	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
-	if (!fsl_qdma->queue) {
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+	fsl_qdma->ctrl_base =
+		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+	fsl_qdma->status_base =
+		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+	fsl_qdma->block_base =
+		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (ret)
+			goto mem_free;
 	}
 
-	for (i = 0; i < fsl_qdma->n_chans; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		fsl_chan->qdma = fsl_qdma;
-		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-							fsl_qdma->num_blocks);
-		fsl_chan->free = true;
+	k = 0;
+	for (i = 0; i < QDMA_QUEUES; i++) {
+		for (j = 0; j < QDMA_BLOCKS; j++) {
+			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
+			if (ret)
+				goto mem_free;
+			fsl_qdma->cmd_queues[j][i].channel_id = k;
+			k++;
+		}
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.");
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+		goto mem_free;
 	}
 
 	return 0;
 
-err:
-	rte_free(fsl_qdma->chans);
-	rte_free(fsl_qdma->status);
+mem_free:
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
+
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
 
-	return -1;
+	munmap(fsl_qdma->ctrl_base, regs_size);
+
+	return ret;
 }
 
 static int
@@ -1052,17 +1169,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+	uint32_t i, j, regs_size;
+
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 
-	for (i = 0; i < max; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+	for (i = 0; i < QDMA_BLOCKS; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-		if (fsl_chan->free == false)
-			dma_release(fsl_chan);
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
 	}
 
-	rte_free(fsl_qdma->status);
-	rte_free(fsl_qdma->chans);
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 7e9e76e21a..75c014f32f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #ifndef _DPAA_QDMA_H_
@@ -11,7 +11,6 @@
 #define BIT(nr)		(1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES	5
 
 #ifndef GENMASK
@@ -20,6 +19,14 @@
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
 #define FSL_QDMA_DEDR			0xe04
@@ -54,15 +61,16 @@
 #define FSL_QDMA_QUEUE_MAX		8
 
 #define FSL_QDMA_BCQMR_EN		0x80000000
-#define FSL_QDMA_BCQMR_EI_BE		0x40
+#define FSL_QDMA_BCQMR_EI		0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
-#define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -75,23 +83,14 @@
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
 
+#define FSL_QDMA_COMP_SG_FORMAT		0x1
+
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
 
-#define QDMA_CCDF_STATUS		20
-#define QDMA_CCDF_OFFSET		20
-#define QDMA_CCDF_MASK			GENMASK(28, 20)
-#define QDMA_CCDF_FOTMAT		BIT(29)
-#define QDMA_CCDF_SER			BIT(30)
-
-#define QDMA_SG_FIN			BIT(30)
-#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
-
-#define COMMAND_QUEUE_OVERFLOW		10
-
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE			64
 #define QDMA_STATUS_SIZE		64
@@ -101,6 +100,7 @@
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
 #define QDMA_DELAY			1000
+#define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -118,89 +118,145 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
 	(((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
-struct fsl_qdma_format {
-	__le32 status; /* ser, status */
-	__le32 cfg;	/* format, offset */
-	union {
-		struct {
-			__le32 addr_lo;	/* low 32-bits of 40-bit address */
-			u8 addr_hi;	/* high 8-bits of 40-bit address */
-			u8 __reserved1[2];
-			u8 cfg8b_w1; /* dd, queue */
-		};
-		__le64 data;
-	};
-};
+struct fsl_qdma_comp_cmd_desc {
+	uint8_t status;
+	uint32_t rsv0:22;
+	uint32_t ser:1;
+	uint32_t rsv1:21;
+	uint32_t offset:9;
+	uint32_t format:3;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint16_t rsv3;
+	uint8_t queue:3;
+	uint8_t rsv4:3;
+	uint8_t dd:2;
+} __rte_packed;
+
+struct fsl_qdma_comp_sg_desc {
+	uint32_t offset:13;
+	uint32_t rsv0:19;
+	uint32_t length:30;
+	uint32_t final:1;
+	uint32_t extion:1;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint32_t rsv1:24;
+} __rte_packed;
 
-/* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	__le32 rev3;
-	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	__le32 rev5;
-	__le32 cmd;
-};
+	uint32_t rsv0;
+	uint32_t ssd:12;
+	uint32_t sss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint32_t rsv3:17;
+	uint32_t prefetch:1;
+	uint32_t rsv4:1;
+	uint32_t ssen:1;
+	uint32_t rthrotl:4;
+	uint32_t sqos:3;
+	uint32_t ns:1;
+	uint32_t srttype:4;
+} __rte_packed;
 
-/* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	__le32 rev1;
-	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	__le32 rev3;
-	__le32 cmd;
+	uint32_t rsv0;
+	uint32_t dsd:12;
+	uint32_t dss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint16_t rsv3;
+	uint32_t lwc:2;
+	uint32_t rsv4:1;
+	uint32_t dsen:1;
+	uint32_t wthrotl:4;
+	uint32_t dqos:3;
+	uint32_t ns:1;
+	uint32_t dwttype:4;
+} __rte_packed;
+
+struct fsl_qdma_df {
+	struct fsl_qdma_sdf sdf;
+	struct fsl_qdma_ddf ddf;
 };
 
-struct fsl_qdma_chan {
-	struct fsl_qdma_engine	*qdma;
-	struct fsl_qdma_queue	*queue;
-	bool			free;
-	struct list_head	list;
+#define FSL_QDMA_SG_MAX_ENTRY 64
+#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE)
+struct fsl_qdma_cmpd_ft {
+	struct fsl_qdma_comp_sg_desc desc_buf;
+	struct fsl_qdma_comp_sg_desc desc_sbuf;
+	struct fsl_qdma_comp_sg_desc desc_dbuf;
+	uint64_t cache_align[2];
+	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
+	uint64_t phy_ssge;
+	uint64_t phy_dsge;
+	uint64_t phy_df;
+} __rte_packed;
+
+#define DPAA_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
+
+struct fsl_qdma_desc {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint64_t flag;
+	uint64_t len;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format	*virt_head;
-	struct list_head	comp_used;
-	struct list_head	comp_free;
-	dma_addr_t		bus_addr;
-	u32			n_cq;
-	u32			id;
-	u32			count;
-	u32			pending;
-	struct fsl_qdma_format	*cq;
-	void			*block_base;
-	struct rte_dma_stats	stats;
+	int used;
+	struct fsl_qdma_cmpd_ft **ft;
+	uint16_t ci;
+	struct rte_ring *complete_burst;
+	struct rte_ring *complete_desc;
+	struct rte_ring *complete_pool;
+	uint16_t n_cq;
+	uint8_t block_id;
+	uint8_t queue_id;
+	uint8_t channel_id;
+	void *block_vir;
+	uint32_t le_cqmr;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t desc_in_hw[QDMA_QUEUE_SIZE];
+	struct rte_dma_stats stats;
+	struct fsl_qdma_desc *pending_desc;
+	uint16_t pending_max;
+	uint16_t pending_start;
+	uint16_t pending_num;
+	uint16_t complete_start;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
-struct fsl_qdma_comp {
-	dma_addr_t		bus_addr;
-	dma_addr_t		desc_bus_addr;
-	void			*virt_addr;
-	int			index;
-	void			*desc_virt_addr;
-	struct fsl_qdma_chan	*qchan;
-	dma_call_back		call_back_func;
-	void			*params;
-	struct list_head	list;
+struct fsl_qdma_status_queue {
+	uint16_t n_cq;
+	uint16_t complete;
+	uint8_t block_id;
+	void *block_vir;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	struct rte_dma_stats stats;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
 struct fsl_qdma_engine {
-	int			desc_allocated;
-	void			*ctrl_base;
-	void			*status_base;
-	void			*block_base;
-	u32			n_chans;
-	u32			n_queues;
-	int			error_irq;
-	struct fsl_qdma_queue	*queue;
-	struct fsl_qdma_queue	**status;
-	struct fsl_qdma_chan	*chans;
-	u32			num_blocks;
-	u8			free_block_id;
-	u32			vchan_map[4];
-	int			block_offset;
+	void *reg_base;
+	void *ctrl_base;
+	void *status_base;
+	void *block_base;
+	uint32_t n_queues;
+	uint8_t block_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES];
+	struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
+	uint32_t num_blocks;
+	int block_offset;
 };
 
-static rte_atomic32_t wait_task[CORE_NUMBER];
-
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 09/15] dma/dpaa: support burst capacity API
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (7 preceding siblings ...)
  2024-10-08 10:58             ` [v5 08/15] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 10/15] dma/dpaa: add silent mode support Gagandeep Singh
                               ` (5 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch improves the dpaa qdma driver and
adds dpaa_qdma_burst_capacity API which returns the
remaining space in the descriptor ring.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index a10a867580..94be9c5fd1 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1039,6 +1039,15 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
+{
+	const struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	return fsl_queue->pending_max - fsl_queue->pending_num;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
@@ -1152,6 +1161,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
+	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
 	ret = dpaa_qdma_init(dmadev);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 10/15] dma/dpaa: add silent mode support
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (8 preceding siblings ...)
  2024-10-08 10:58             ` [v5 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
                               ` (4 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add silent mode support.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 46 ++++++++++++++++++++++++++++++++----
 drivers/dma/dpaa/dpaa_qdma.h |  1 +
 2 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 94be9c5fd1..02f8685c48 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -119,6 +119,7 @@ dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 static int
 fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
+	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_comp_cmd_desc *ccdf;
@@ -173,7 +174,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		ccdf = &queue->cq[i];
 		qdma_desc_addr_set64(ccdf, phy_ft);
 		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
-
+		if (!fsl_qdma->is_silent)
+			ccdf->ser = 1;
 		ccdf->queue = queue->queue_id;
 	}
 	queue->ci = 0;
@@ -575,9 +577,12 @@ static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	uint16_t num)
 {
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 	uint16_t i, idx, start, dq;
 	int ret, dq_cnt;
 
+	if (fsl_qdma->is_silent)
+		return 0;
 
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
 eq_again:
@@ -622,17 +627,34 @@ static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
 	int overflow = 0;
+	uint32_t reg;
 	uint16_t blk_drain, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
-	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+	if (fsl_qdma->is_silent) {
+		reg = qdma_readl_be(block +
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
 			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
 
-	if (likely(!overflow))
+	if (likely(!overflow)) {
 		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num >= 10000) {
+			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
+			check_num = 0;
+		}
+		goto overflow_check;
+	}
 
 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
@@ -877,10 +899,13 @@ dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 }
 
 static int
-dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-	__rte_unused const struct rte_dma_conf *dev_conf,
+dpaa_qdma_configure(struct rte_dma_dev *dmadev,
+	const struct rte_dma_conf *dev_conf,
 	__rte_unused uint32_t conf_sz)
 {
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+
+	fsl_qdma->is_silent = dev_conf->enable_silent;
 	return 0;
 }
 
@@ -966,6 +991,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode");
+
+		return 0;
+	}
+
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
 			fsl_queue->block_id);
 	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
@@ -995,6 +1026,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode");
+
+		return 0;
+	}
 
 	*has_error = false;
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 75c014f32f..9b69db517e 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -257,6 +257,7 @@ struct fsl_qdma_engine {
 	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
 	uint32_t num_blocks;
 	int block_offset;
+	int is_silent;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 11/15] dma/dpaa: add workaround for ERR050757
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (9 preceding siblings ...)
  2024-10-08 10:58             ` [v5 10/15] dma/dpaa: add silent mode support Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
                               ` (3 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

ERR050757 on LS104x indicates:

For outbound PCIe read transactions, a completion buffer is used
to store the PCIe completions till the data is passed back to the
initiator. At most 16 outstanding transactions are allowed and
maximum read request is 256 bytes. The completion buffer size
inside the controller needs to be at least 4KB, but the PCIe
controller has 3 KB of buffer. In case the size of pending
outbound read transactions of more than 3KB, the PCIe controller
may drop the incoming completions without notifying the initiator
of the transaction, leaving transactions unfinished. All
subsequent outbound reads to PCIe are blocked permanently.
To avoid qDMA hang as it keeps waiting for data that was silently
dropped, set stride mode for qDMA.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       |  3 ++-
 doc/guides/dmadevs/dpaa.rst  |  2 ++
 drivers/dma/dpaa/dpaa_qdma.c | 38 +++++++++++++++++++++++++++++++++---
 drivers/dma/dpaa/dpaa_qdma.h | 19 +++++++-----------
 4 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 012935d5d7..f81e466318 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -468,7 +468,8 @@ soc_dpaa = {
         ['RTE_MACHINE', '"dpaa"'],
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
-        ['RTE_MAX_NUMA_NODES', 1]
+        ['RTE_MAX_NUMA_NODES', 1],
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index f99bfc6087..746919ec6b 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -42,6 +42,8 @@ Compilation
 For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
+- ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+
 Initialization
 --------------
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 02f8685c48..026ba124e1 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,7 +167,6 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -449,8 +448,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
-			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
+			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+			temp->le_cqmr = reg;
 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
 		}
 
@@ -694,6 +694,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ret = fsl_qdma_enqueue_overflow(fsl_queue);
 	if (unlikely(ret))
@@ -701,6 +704,19 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -733,6 +749,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	uint32_t total_len;
 	uint16_t start, idx, num, i, next_idx;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 eq_sg:
 	total_len = 0;
@@ -798,6 +817,19 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	ft->desc_dsge[num - 1].final = 1;
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
 	if (ret)
 		return ret;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 9b69db517e..171c093117 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -77,8 +77,6 @@
 #define FSL_QDMA_DMR_DQD		0x40000000
 #define FSL_QDMA_DSR_DB			0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE	64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
@@ -88,18 +86,15 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
-#define FSL_QDMA_CMD_LWC_OFFSET		16
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
-#define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		64
-#define QDMA_CCSR_BASE			0x8380000
-#define VIRT_CHANNELS			32
-#define QDMA_BLOCK_OFFSET		0x10000
-#define QDMA_BLOCKS			4
-#define QDMA_QUEUES			8
-#define QDMA_DELAY			1000
+#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
+#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
+#define QDMA_CCSR_BASE 0x8380000
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
 #define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 12/15] dma/dpaa: qdma stall workaround for ERR050265
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (10 preceding siblings ...)
  2024-10-08 10:58             ` [v5 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
                               ` (2 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Non-prefetchable read setting in the source descriptor may be
required for targets other than local memory. Prefetchable read
setting will offer better performance for misaligned transfers
in the form of fewer transactions and should be set if possible.
This patch also fixes QDMA stall issue due to unaligned
transactions.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       | 3 ++-
 doc/guides/dmadevs/dpaa.rst  | 1 +
 drivers/dma/dpaa/dpaa_qdma.c | 9 +++++++++
 3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index f81e466318..f63ef41130 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -469,7 +469,8 @@ soc_dpaa = {
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
         ['RTE_MAX_NUMA_NODES', 1],
-	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true],
+	['RTE_DMA_DPAA_ERRATA_ERR050265', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 746919ec6b..8a7c0befc3 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -43,6 +43,7 @@ For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
 - ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+- ``RTE_DMA_DPAA_ERRATA_ERR050265`` - enable software workaround for Errata-A050265
 
 Initialization
 --------------
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 026ba124e1..b2e96432fb 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,6 +167,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -707,6 +710,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
@@ -820,6 +826,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 13/15] dma/dpaa: add Scatter Gather support
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (11 preceding siblings ...)
  2024-10-08 10:58             ` [v5 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
  2024-10-08 10:58             ` [v5 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Support copy_sg operation for scatter gather.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 55 ++++++++++++++++++++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h | 10 ++++++-
 2 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index b2e96432fb..7c199b6dd0 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1021,6 +1021,60 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	return ret;
 }
 
+static int
+dpaa_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
+{
+	int ret;
+	uint16_t i, start, idx;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
+			__func__, nb_src, nb_dst, vchan);
+		return -EINVAL;
+	}
+
+	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	start = fsl_queue->pending_start + fsl_queue->pending_num;
+	start = start & (fsl_queue->pending_max - 1);
+	idx = start;
+
+	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+
+	for (i = 0; i < nb_src; i++) {
+		if (unlikely(src[i].length != dst[i].length)) {
+			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
+				src[i].length, dst[i].length);
+			return -EINVAL;
+		}
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		fsl_queue->pending_desc[idx].src = src[i].addr;
+		fsl_queue->pending_desc[idx].dst = dst[i].addr;
+		fsl_queue->pending_desc[idx].len = dst[i].length;
+		fsl_queue->pending_desc[idx].flag = idx_addr[i];
+	}
+	fsl_queue->pending_num += nb_src;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
+
+	return ret;
+}
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
@@ -1235,6 +1289,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->device = &dpaa_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
+	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 171c093117..1e820d0207 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -24,8 +24,13 @@
 #define QDMA_STATUS_REGION_OFFSET \
 	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
 #define QDMA_STATUS_REGION_SIZE 0x10000
-#define DPAA_QDMA_COPY_IDX_OFFSET 8
+
 #define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(DPAA_QDMA_COPY_IDX_OFFSET)
+#define DPAA_QDMA_SG_IDX_ADDR_MASK \
+	(DPAA_QDMA_SG_IDX_ADDR_ALIGN - 1)
 
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
@@ -194,6 +199,9 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
+
 #define DPAA_QDMA_IDX_FROM_FLAG(flag) \
 	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 14/15] dma/dpaa: add DMA error checks
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (12 preceding siblings ...)
  2024-10-08 10:58             ` [v5 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-08 10:58             ` [v5 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Sachin Saxena, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add user configurable DMA error checks.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/dmadevs/dpaa.rst      |   6 ++
 drivers/dma/dpaa/dpaa_qdma.c     | 135 ++++++++++++++++++++++++++++++-
 drivers/dma/dpaa/dpaa_qdma.h     |  42 ++++++++++
 drivers/net/dpaa2/dpaa2_ethdev.c |   2 +-
 4 files changed, 183 insertions(+), 2 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 8a7c0befc3..a60457229a 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -69,3 +69,9 @@ Platform Requirement
 
 DPAA DMA driver for DPDK can only work on NXP SoCs
 as listed in the `Supported DPAA SoCs`_.
+
+Device Arguments
+----------------
+
+Use dev arg option ``dpaa_dma_err_check=1`` to check DMA errors at
+driver level. usage example: ``dpaa_bus:dpaa_qdma-1,dpaa_dma_err_check=1``
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 7c199b6dd0..7b9d893cbe 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,11 +4,15 @@
 
 #include <bus_dpaa_driver.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_kvargs.h>
 
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
 static uint32_t s_sg_max_entry_sz = 2000;
+static bool s_hw_err_check;
+
+#define DPAA_DMA_ERROR_CHECK "dpaa_dma_err_check"
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -638,7 +642,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 
 	check_num = 0;
 overflow_check:
-	if (fsl_qdma->is_silent) {
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
 		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -1076,13 +1080,81 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+	struct fsl_qdma_err_reg local;
+	size_t i, offset = 0;
+	char err_msg[512];
+
+	local.dedr_be = rte_read32(&reg->dedr_be);
+	if (!local.dedr_be)
+		return 0;
+	offset = sprintf(err_msg, "ERR detected:");
+	if (local.dedr.ere) {
+		offset += sprintf(&err_msg[offset],
+			" ere(Enqueue rejection error)");
+	}
+	if (local.dedr.dde) {
+		offset += sprintf(&err_msg[offset],
+			" dde(Destination descriptor error)");
+	}
+	if (local.dedr.sde) {
+		offset += sprintf(&err_msg[offset],
+			" sde(Source descriptor error)");
+	}
+	if (local.dedr.cde) {
+		offset += sprintf(&err_msg[offset],
+			" cde(Command descriptor error)");
+	}
+	if (local.dedr.wte) {
+		offset += sprintf(&err_msg[offset],
+			" wte(Write transaction error)");
+	}
+	if (local.dedr.rte) {
+		offset += sprintf(&err_msg[offset],
+			" rte(Read transaction error)");
+	}
+	if (local.dedr.me) {
+		offset += sprintf(&err_msg[offset],
+			" me(Multiple errors of the same type)");
+	}
+	DPAA_QDMA_ERR("%s", err_msg);
+	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+			QDMA_IN(&reg->deccd_le[i]);
+	}
+	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
+	local.decbr = rte_read32(&reg->decbr);
+
+	offset = sprintf(err_msg, "ERR command:");
+	offset += sprintf(&err_msg[offset],
+		" status: %02x, ser: %d, offset:%d, fmt: %02x",
+		local.err_cmd.status, local.err_cmd.ser,
+		local.err_cmd.offset, local.err_cmd.format);
+	offset += sprintf(&err_msg[offset],
+		" address: 0x%"PRIx64", queue: %d, dd: %02x",
+		(uint64_t)local.err_cmd.addr_hi << 32 |
+		local.err_cmd.addr_lo,
+		local.err_cmd.queue, local.err_cmd.dd);
+	DPAA_QDMA_ERR("%s", err_msg);
+	DPAA_QDMA_ERR("ERR command block: %d, queue: %d",
+		local.deccqidr.block, local.deccqidr.queue);
+
+	rte_write32(local.dedr_be, &reg->dedr_be);
+
+	return -EIO;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1107,6 +1179,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err)
+			fsl_queue->stats.errors++;
+	}
 
 	return dq_num;
 }
@@ -1117,7 +1195,9 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1138,6 +1218,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	for (i = 0; i < dq_num; i++)
 		last_idx[i] = desc_complete[i]->flag;
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err) {
+			if (has_error)
+				*has_error = true;
+			fsl_queue->stats.errors++;
+		}
+	}
+
 	return dq_num;
 }
 
@@ -1189,6 +1279,43 @@ static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.stats_reset		  = dpaa_qdma_stats_reset,
 };
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	if (strcmp(value, "1"))
+		return -1;
+
+	return 0;
+}
+
+static int
+dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
+}
+
 static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
@@ -1199,6 +1326,11 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ret;
 	uint32_t i, j, k;
 
+	if (dpaa_get_devargs(dmadev->device->devargs, DPAA_DMA_ERROR_CHECK)) {
+		s_hw_err_check = true;
+		DPAA_QDMA_INFO("Enable DMA error checks");
+	}
+
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -1340,4 +1472,5 @@ static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
 };
 
 RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(dpaa_qdma, DPAA_DMA_ERROR_CHECK "=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 1e820d0207..91eaf1455a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -199,6 +199,48 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+	uint32_t me:1;
+	uint32_t rsv0:1;
+	uint32_t rte:1;
+	uint32_t wte:1;
+	uint32_t cde:1;
+	uint32_t sde:1;
+	uint32_t dde:1;
+	uint32_t ere:1;
+	uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+	uint32_t rsv:27;
+	uint32_t block:2;
+	uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+	(sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+	uint32_t deier;
+	union {
+		rte_be32_t dedr_be;
+		struct fsl_qdma_dedr_reg dedr;
+	};
+	uint32_t rsv0[2];
+	union {
+		rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+		struct fsl_qdma_comp_cmd_desc err_cmd;
+	};
+	uint32_t rsv1[4];
+	union {
+		rte_be32_t deccqidr_be;
+		struct fsl_qdma_deccqidr_reg deccqidr;
+	};
+	rte_be32_t decbr;
+};
+
 #define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
 
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index c411ad5a97..814aaf0996 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2682,7 +2682,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 
 	if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
 		dpaa2_enable_err_queue = 1;
-		DPAA2_PMD_INFO("Enable error queue");
+		DPAA2_PMD_INFO("Enable DMA error checks");
 	}
 
 	/* Allocate memory for hardware structure for queues */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v5 15/15] bus/dpaa: add port bmi stats
  2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                               ` (13 preceding siblings ...)
  2024-10-08 10:58             ` [v5 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
@ 2024-10-08 10:58             ` Gagandeep Singh
  2024-10-09 18:02               ` Stephen Hemminger
  14 siblings, 1 reply; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-08 10:58 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add BMI statistics and fixing the existing extended
statistics

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 65 +++++++++++++++++++++++++++-
 drivers/bus/dpaa/include/fman.h      |  4 +-
 drivers/bus/dpaa/include/fsl_fman.h  | 12 +++++
 drivers/bus/dpaa/version.map         |  4 ++
 drivers/net/dpaa/dpaa_ethdev.c       | 46 +++++++++++++++++---
 drivers/net/dpaa/dpaa_ethdev.h       | 12 +++++
 6 files changed, 134 insertions(+), 9 deletions(-)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 24a99f7235..27b39a4975 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -244,8 +244,8 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
 
 	for (i = 0; i < n; i++)
-		value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) |
-				(u64)in_be32((char *)regs + base_offset +
+		value[i] = ((u64)in_be32((char *)regs + base_offset + 8 * i) |
+				((u64)in_be32((char *)regs + base_offset +
 				8 * i + 4)) << 32);
 }
 
@@ -266,6 +266,67 @@ fman_if_stats_reset(struct fman_if *p)
 		;
 }
 
+void
+fman_if_bmi_stats_enable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp |= FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_disable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp &= ~FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	int i = 0;
+
+	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
+	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
+}
+
+void
+fman_if_bmi_stats_reset(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+
+	out_be32(&regs->fmbm_rfrc, 0);
+	out_be32(&regs->fmbm_rfbc, 0);
+	out_be32(&regs->fmbm_rlfc, 0);
+	out_be32(&regs->fmbm_rffc, 0);
+	out_be32(&regs->fmbm_rfdc, 0);
+	out_be32(&regs->fmbm_rfldec, 0);
+	out_be32(&regs->fmbm_rodc, 0);
+	out_be32(&regs->fmbm_rbdc, 0);
+}
+
 void
 fman_if_promiscuous_enable(struct fman_if *p)
 {
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index f918836ec2..1f120b7614 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -56,6 +56,8 @@
 #define FMAN_PORT_BMI_FIFO_UNITS	0x100
 #define FMAN_PORT_IC_OFFSET_UNITS	0x10
 
+#define FMAN_BMI_COUNTERS_EN 0x80000000
+
 #define FMAN_ENABLE_BPOOL_DEPLETION	0xF00000F0
 
 #define HASH_CTRL_MCAST_EN	0x00000100
@@ -260,7 +262,7 @@ struct rx_bmi_regs {
 					/**< Buffer Manager pool Information-*/
 	uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
 					/**< Allocate Counter-*/
-	uint32_t reserved0130[8];
+	uint32_t reserved0120[16];
 					/**< 0x130/0x140 - 0x15F reserved -*/
 	uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
 					/**< Congestion Group Map*/
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 20690f8329..5a9750ad0c 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -60,6 +60,18 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
+__rte_internal
+void fman_if_bmi_stats_enable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_disable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value);
+
+__rte_internal
+void fman_if_bmi_stats_reset(struct fman_if *p);
+
 /* Set ignore pause option for a specific interface */
 void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
 
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 3f547f75cf..a17d57632e 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -24,6 +24,10 @@ INTERNAL {
 	fman_dealloc_bufs_mask_hi;
 	fman_dealloc_bufs_mask_lo;
 	fman_if_add_mac_addr;
+	fman_if_bmi_stats_enable;
+	fman_if_bmi_stats_disable;
+	fman_if_bmi_stats_get_all;
+	fman_if_bmi_stats_reset;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
 	fman_if_discard_rx_errors;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 51f5422e0c..da4a64d99a 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -131,6 +131,22 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 		offsetof(struct dpaa_if_stats, tvlan)},
 	{"rx_undersized",
 		offsetof(struct dpaa_if_stats, tund)},
+	{"rx_frame_counter",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
+	{"rx_bad_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
+	{"rx_large_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
+	{"rx_filter_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
+	{"rx_frame_discrad_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
+	{"rx_frame_list_dma_err_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
+	{"rx_out_of_buffer_discard ",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
+	{"rx_buf_diallocate",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
@@ -430,6 +446,7 @@ static void dpaa_interrupt_handler(void *param)
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
 	uint16_t i;
 
 	PMD_INIT_FUNC_TRACE();
@@ -443,7 +460,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 	else
 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
 
-	fman_if_enable_rx(dev->process_private);
+	fman_if_bmi_stats_enable(fif);
+	fman_if_bmi_stats_reset(fif);
+	fman_if_enable_rx(fif);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -461,8 +480,10 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 	dev->data->dev_started = 0;
 
-	if (!fif->is_shared_mac)
+	if (!fif->is_shared_mac) {
+		fman_if_bmi_stats_disable(fif);
 		fman_if_disable_rx(fif);
+	}
 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -769,6 +790,7 @@ static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	fman_if_stats_reset(dev->process_private);
+	fman_if_bmi_stats_reset(dev->process_private);
 
 	return 0;
 }
@@ -777,8 +799,9 @@ static int
 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned int n)
 {
-	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (n < num)
 		return num;
@@ -789,10 +812,16 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	fman_if_stats_get_all(dev->process_private, values,
 			      sizeof(struct dpaa_if_stats) / 8);
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < num - (bmi_count - 1); i++) {
 		xstats[i].id = i;
 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
 	}
+	fman_if_bmi_stats_get_all(dev->process_private, values);
+	for (j = 0; i < num; i++, j++) {
+		xstats[i].id = i;
+		xstats[i].value = values[j];
+	}
+
 	return i;
 }
 
@@ -819,8 +848,9 @@ static int
 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		      uint64_t *values, unsigned int n)
 {
-	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (!ids) {
 		if (n < stat_cnt)
@@ -832,10 +862,14 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		fman_if_stats_get_all(dev->process_private, values_copy,
 				      sizeof(struct dpaa_if_stats) / 8);
 
-		for (i = 0; i < stat_cnt; i++)
+		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
 			values[i] =
 				values_copy[dpaa_xstats_strings[i].offset / 8];
 
+		fman_if_bmi_stats_get_all(dev->process_private, values);
+		for (j = 0; i < stat_cnt; i++, j++)
+			values[i] = values_copy[j];
+
 		return stat_cnt;
 	}
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 0006bd33d4..1278623e7b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -212,6 +212,18 @@ dpaa_rx_cb_atomic(void *event,
 		  const struct qm_dqrr_entry *dqrr,
 		  void **bufs);
 
+struct dpaa_if_rx_bmi_stats {
+	uint32_t fmbm_rstc;		/**< Rx Statistics Counters*/
+	uint32_t fmbm_rfrc;		/**< Rx Frame Counter*/
+	uint32_t fmbm_rfbc;		/**< Rx Bad Frames Counter*/
+	uint32_t fmbm_rlfc;		/**< Rx Large Frames Counter*/
+	uint32_t fmbm_rffc;		/**< Rx Filter Frames Counter*/
+	uint32_t fmbm_rfdc;		/**< Rx Frame Discard Counter*/
+	uint32_t fmbm_rfldec;		/**< Rx Frames List DMA Error Counter*/
+	uint32_t fmbm_rodc;		/**< Rx Out of Buffers Discard nntr*/
+	uint32_t fmbm_rbdc;		/**< Rx Buffers Deallocate Counter*/
+};
+
 /* PMD related logs */
 extern int dpaa_logtype_pmd;
 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v5 15/15] bus/dpaa: add port bmi stats
  2024-10-08 10:58             ` [v5 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
@ 2024-10-09 18:02               ` Stephen Hemminger
  0 siblings, 0 replies; 165+ messages in thread
From: Stephen Hemminger @ 2024-10-09 18:02 UTC (permalink / raw)
  To: Gagandeep Singh; +Cc: dev, Hemant Agrawal, Sachin Saxena

On Tue,  8 Oct 2024 16:28:10 +0530
Gagandeep Singh <g.singh@nxp.com> wrote:

> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> 
> Add BMI statistics and fixing the existing extended
> statistics
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---

This patch is failing build, Fedora has a more recent Gcc with more checking


==== 20 line log output for Fedora 40 (dpdk_unit_test): ====
|                  ^~~
In function 'rte_mov15_or_less',
inlined from 'rte_memcpy_aligned' at ../lib/eal/x86/include/rte_memcpy.h:706:10,
inlined from 'rte_memcpy' at ../lib/eal/x86/include/rte_memcpy.h:755:10,
inlined from 'qdma_cntx_idx_ring_eq' at ../drivers/dma/dpaa2/dpaa2_qdma.c:80:3,
inlined from 'dpaa2_qdma_dq_fd' at ../drivers/dma/dpaa2/dpaa2_qdma.c:953:9,
inlined from 'dpaa2_qdma_dequeue' at ../drivers/dma/dpaa2/dpaa2_qdma.c:1109:9:
../lib/eal/x86/include/rte_memcpy.h:89:63: error: array subscript 'const struct rte_uint32_alias[0]' is partly outside array bounds of 'const void[2]' [-Werror=array-bounds=]
89 |                         ((const struct rte_uint32_alias *)src)->val;
|                         ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~
../drivers/dma/dpaa2/dpaa2_qdma.c: In function 'dpaa2_qdma_dequeue':
../drivers/dma/dpaa2/dpaa2_qdma.c:943:18: note: object 'idx' of size 2
943 |         uint16_t idx, att;
|                  ^~~
../drivers/dma/dpaa2/dpaa2_qdma.c:943:18: note: at offset [8, 10] into object 'idx' of size 2
cc1: all warnings being treated as errors
[1029/2944] Compiling C object 'drivers/a715181@@tmp_rte_dma_idxd at sta/dma_idxd_idxd_common.c.o'.
[1030/2944] Generating rte_common_sfc_efx.sym_chk with a meson_exe.py custom command.
[1031/2944] Generating rte_common_cnxk.sym_chk with a meson_exe.py custom command.
ninja: build stopped: subcommand failed.
==== End log output ====

^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 00/15] NXP DMA driver fixes and Enhancements
  2024-10-08 10:57             ` [v5 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-10-14  9:36               ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                                   ` (15 more replies)
  0 siblings, 16 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev

V6 changes:
* fix array bound compilation warning with GCC 11.4

V5 changes:
* typos and doc compilation fixed

V4 changes:
* rebased series to latest commit and patches reduced.

V3 changes:
* fix 32 bit compilation issue

V2 changes:
* fix compilation issue on ubuntu 22.04

Hemant Agrawal (1):
  bus/dpaa: add port bmi stats

Jun Yang (14):
  dma/dpaa2: configure route by port by PCIe port param
  dma/dpaa2: refactor driver code
  bus/fslmc: enhance the qbman dq storage logic
  dma/dpaa2: add short FD support
  dma/dpaa2: limit the max descriptor number
  dma/dpaa2: change the DMA copy return value
  dma/dpaa2: move the qdma header to common place
  dma/dpaa: refactor driver
  dma/dpaa: support burst capacity API
  dma/dpaa: add silent mode support
  dma/dpaa: add workaround for ERR050757
  dma/dpaa: qdma stall workaround for ERR050265
  dma/dpaa: add Scatter Gather support
  dma/dpaa: add DMA error checks

 config/arm/meson.build                        |    4 +-
 doc/api/doxy-api-index.md                     |    2 +-
 doc/api/doxy-api.conf.in                      |    2 +-
 doc/guides/dmadevs/dpaa.rst                   |    9 +
 doc/guides/dmadevs/dpaa2.rst                  |   10 +
 drivers/bus/dpaa/base/fman/fman_hw.c          |   65 +-
 drivers/bus/dpaa/include/fman.h               |    4 +-
 drivers/bus/dpaa/include/fsl_fman.h           |   12 +
 drivers/bus/dpaa/version.map                  |    4 +
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c      |   25 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |    7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |   38 +-
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  |   29 +-
 drivers/common/dpaax/meson.build              |    3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h     |   23 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c   |   23 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c   |    4 +-
 drivers/dma/dpaa/dpaa_qdma.c                  | 1593 +++++++----
 drivers/dma/dpaa/dpaa_qdma.h                  |  292 +-
 drivers/dma/dpaa2/dpaa2_qdma.c                | 2436 +++++++++--------
 drivers/dma/dpaa2/dpaa2_qdma.h                |  243 +-
 drivers/dma/dpaa2/meson.build                 |    4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        |  177 --
 drivers/dma/dpaa2/version.map                 |   14 -
 drivers/net/dpaa/dpaa_ethdev.c                |   46 +-
 drivers/net/dpaa/dpaa_ethdev.h                |   12 +
 drivers/net/dpaa2/dpaa2_ethdev.c              |   83 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   19 +-
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c         |    4 +-
 29 files changed, 2890 insertions(+), 2297 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
 delete mode 100644 drivers/dma/dpaa2/version.map

-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 01/15] dma/dpaa2: configure route by port by PCIe port param
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
                                   ` (14 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

struct {
	uint64_t coreid : 4; /**--rbp.sportid / rbp.dportid*/
	uint64_t pfid : 8; /**--rbp.spfid / rbp.dpfid*/
	uint64_t vfen : 1; /**--rbp.svfa / rbp.dvfa*/
	uint64_t vfid : 16; /**--rbp.svfid / rbp.dvfid*/
	.....
} pcie;

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  | 29 ++++++---
 drivers/dma/dpaa2/dpaa2_qdma.c                | 59 +++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h                | 38 +++++++++++-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        | 55 +----------------
 drivers/dma/dpaa2/version.map                 |  1 -
 5 files changed, 100 insertions(+), 82 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48ffb1b46e..7528b610e1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
  *
  */
 #ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
 			uint32_t saddr_hi;
 
 			uint32_t len_sl:18;
-			uint32_t rsv1:14;
-
+			uint32_t rsv13:2;
+			uint32_t svfid:6;
+			uint32_t rsv12:2;
+			uint32_t spfid:2;
+			uint32_t rsv1:2;
 			uint32_t sportid:4;
-			uint32_t rsv2:22;
+			uint32_t rsv2:1;
+			uint32_t sca:1;
+			uint32_t sat:2;
+			uint32_t sattr:3;
+			uint32_t svfa:1;
+			uint32_t stc:3;
 			uint32_t bmt:1;
-			uint32_t rsv3:1;
+			uint32_t dvfid:6;
+			uint32_t rsv3:2;
+			uint32_t dpfid:2;
+			uint32_t rsv31:2;
 			uint32_t fmt:2;
 			uint32_t sl:1;
 			uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
 			uint32_t acc_err:4;
 			uint32_t rsv5:4;
 			uint32_t ser:1;
-			uint32_t rsv6:3;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
 			uint32_t wrttype:4;
 			uint32_t dqos:3;
 			uint32_t drbp:1;
 			uint32_t dlwc:2;
-			uint32_t rsv7:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
 			uint32_t rdttype:4;
 			uint32_t sqos:3;
 			uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
 			uint32_t saddr_lo;
 
 			uint32_t saddr_hi:17;
-			uint32_t rsv1:15;
+			uint32_t rsv1_att:15;
 
 			uint32_t len;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5780e49297..5d4749eae3 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
 static inline int
 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
 		     uint32_t len, struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_rbp *rbp, int ser)
+		     struct dpaa2_qdma_rbp *rbp, int ser)
 {
 	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
 	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 			uint64_t fle_iova,
-			struct rte_dpaa2_qdma_rbp *rbp,
+			struct dpaa2_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags, uint32_t fmt)
 {
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* source */
 		sdd->read_cmd.portid = rbp->sportid;
 		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->svfid;
 
 		if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* destination */
 		sdd->write_cmd.portid = rbp->dportid;
 		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->dvfid;
 
 		if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
 		     struct rte_dpaa2_qdma_job **job,
 		     uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	size_t iova;
 	int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
 				  struct rte_dpaa2_qdma_job **job,
 				  uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
 			   struct rte_dpaa2_qdma_job **job,
 			   uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
 			struct rte_dpaa2_qdma_job **job,
 			uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	void *elem;
 	struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
 	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
 }
 
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-				struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+	const struct rte_dma_vchan_conf *conf)
 {
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.dportid = conf->dst_port.pcie.coreid;
+		vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+		if (conf->dst_port.pcie.vfen) {
+			vq->rbp.dvfa = 1;
+			vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+		}
+		vq->rbp.drbp = 1;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.sportid = conf->src_port.pcie.coreid;
+		vq->rbp.spfid = conf->src_port.pcie.pfid;
+		if (conf->src_port.pcie.vfen) {
+			vq->rbp.svfa = 1;
+			vq->rbp.dvfid = conf->src_port.pcie.vfid;
+		}
+		vq->rbp.srbp = 1;
+	}
 
-	memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
-			sizeof(struct rte_dpaa2_qdma_rbp));
+	return 0;
 }
 
 static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
-	int sg_enable = 0;
+	int sg_enable = 0, ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
+	ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+	if (ret)
+		return ret;
+
 	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
 		sg_enable = 1;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 5941b5a5d3..811906fcbc 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
 	};
 } __rte_packed;
 
+struct dpaa2_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	uint32_t dvfa:1;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	uint32_t svfa:1;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:2;
+};
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
 	/** Route by port */
-	struct rte_dpaa2_qdma_rbp rbp;
+	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
 	/** States if this vq has exclusively associated hw queue */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 5a8da46d12..b0bf9d8bcc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -13,42 +13,6 @@
 /** States if the destination addresses is physical. */
 #define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
 
-struct rte_dpaa2_qdma_rbp {
-	uint32_t use_ultrashort:1;
-	uint32_t enable:1;
-	/**
-	 * dportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t dportid:4;
-	uint32_t dpfid:2;
-	uint32_t dvfid:6;
-	/*using route by port for destination */
-	uint32_t drbp:1;
-	/**
-	 * sportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t sportid:4;
-	uint32_t spfid:2;
-	uint32_t svfid:6;
-	/* using route by port for source */
-	uint32_t srbp:1;
-	/* Virtual Function Active */
-	uint32_t vfa:1;
-	uint32_t rsv:3;
-};
-
 /** Determines a QDMA job */
 struct rte_dpaa2_qdma_job {
 	/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
 	 */
 	uint16_t status;
 	uint16_t vq_id;
+	uint64_t cnxt;
 	/**
 	 * FLE pool element maintained by user, in case no qDMA response.
 	 * Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param rbp_config
- *   Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_rbp *rbp_config);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
index fc16517f7a..43e8b2d5c5 100644
--- a/drivers/dma/dpaa2/version.map
+++ b/drivers/dma/dpaa2/version.map
@@ -10,5 +10,4 @@ EXPERIMENTAL {
 	rte_dpaa2_qdma_copy_multi;
 	rte_dpaa2_qdma_vchan_fd_us_enable;
 	rte_dpaa2_qdma_vchan_internal_sg_enable;
-	rte_dpaa2_qdma_vchan_rbp_enable;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 02/15] dma/dpaa2: refactor driver code
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-14  9:36                 ` [v6 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
                                   ` (13 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Anatoly Burakov; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

refactor the driver code with changes in:
- multiple HW queues
- SMA single copy and SG copy
- silent mode

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |    8 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 2200 ++++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  148 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  130 +-
 drivers/dma/dpaa2/version.map          |   13 -
 5 files changed, 1150 insertions(+), 1349 deletions(-)
 delete mode 100644 drivers/dma/dpaa2/version.map

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index d2c26231e2..eeeb5d52a8 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -73,3 +73,11 @@ Platform Requirement
 
 DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
 ``Supported DPAA2 SoCs``.
+
+Device Arguments
+----------------
+* Use dev arg option ``fle_pre_populate=1`` to pre-populate all
+  DMA descriptors with pre-initialized values.
+  usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
+* Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
+  usage example: ``fslmc:dpdmai.1,desc_debug=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5d4749eae3..e364cc8f3d 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #include <rte_eal.h>
@@ -14,220 +14,370 @@
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
-#define DPAA2_QDMA_PREFETCH "prefetch"
+#define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
+#define DPAA2_QDMA_DESC_DEBUG "desc_debug"
 
-uint32_t dpaa2_coherent_no_alloc_cache;
-uint32_t dpaa2_coherent_alloc_cache;
+static uint32_t dpaa2_coherent_no_alloc_cache;
+static uint32_t dpaa2_coherent_alloc_cache;
 
-static inline int
-qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd,
-		     struct dpaa2_qdma_rbp *rbp, int ser)
+static struct fsl_mc_io s_proc_mc_reg;
+
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
 {
-	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+	if (strcmp(value, "1"))
+		return -1;
 
-	fd->simple_pci.len_sl = len;
+	return 0;
+}
 
-	fd->simple_pci.bmt = 1;
-	fd->simple_pci.fmt = 3;
-	fd->simple_pci.sl = 1;
-	fd->simple_pci.ser = ser;
+static int
+dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
 
-	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
-	fd->simple_pci.srbp = rbp->srbp;
-	if (rbp->srbp)
-		fd->simple_pci.rdttype = 0;
-	else
-		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+	if (!devargs)
+		return 0;
 
-	/*dest is pcie memory */
-	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
-	fd->simple_pci.drbp = rbp->drbp;
-	if (rbp->drbp)
-		fd->simple_pci.wrttype = 0;
-	else
-		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
 
-	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
 
-	return 0;
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
 }
 
 static inline int
-qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd, int ser)
+qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
+	const uint16_t *elem, uint16_t nb,
+	uint16_t *free_space)
 {
-	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
-
-	fd->simple_ddr.len = len;
-
-	fd->simple_ddr.bmt = 1;
-	fd->simple_ddr.fmt = 3;
-	fd->simple_ddr.sl = 1;
-	fd->simple_ddr.ser = ser;
-	/**
-	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
-	 * Coherent copy of cacheable memory,
-	* lookup in downstream cache, no allocate
-	 * on miss
-	 */
-	fd->simple_ddr.rns = 0;
-	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
-	/**
-	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
-	 * Coherent write of cacheable memory,
-	 * lookup in downstream cache, no allocate on miss
-	 */
-	fd->simple_ddr.wns = 0;
-	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+	uint16_t i;
 
-	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (unlikely(nb > ring->free_space))
+		return 0;
 
-	return 0;
+	for (i = 0; i < nb; i++) {
+		ring->cntx_idx_ring[ring->tail] = elem[i];
+		ring->tail = (ring->tail + 1) &
+			(DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space -= nb;
+	ring->nb_in_ring += nb;
+
+	if (free_space)
+		*free_space = ring->free_space;
+
+	return nb;
 }
 
-static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			uint64_t fle_iova,
-			struct dpaa2_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags, uint32_t fmt)
+static inline int
+qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring,
+	uint16_t *elem, uint16_t max)
 {
-	struct qdma_sdd *sdd;
-	uint64_t sdd_iova;
+	int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring;
+
+	if (!ret)
+		return 0;
+
+	if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			ret * sizeof(uint16_t));
+		ring->start += ret;
+	} else {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			(DPAA2_QDMA_MAX_DESC - ring->start) *
+			sizeof(uint16_t));
+		rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start],
+			&ring->cntx_idx_ring[0],
+			(ret - DPAA2_QDMA_MAX_DESC + ring->start) *
+			sizeof(uint16_t));
+		ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space += ret;
+	ring->nb_in_ring -= ret;
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
+	struct qbman_eq_desc eqdesc;
+	struct qbman_swp *swp;
+	uint32_t num_to_send = 0;
+	uint16_t num_tx = 0;
+	uint32_t enqueue_loop, loop;
+	int ret;
+	struct qbman_fd *fd = qdma_vq->fd;
+	uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx;
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d",
+				rte_gettid());
+			return -EIO;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	/* Prepare enqueue descriptor */
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_fq(&eqdesc, txq_id);
+	qbman_eq_desc_set_no_orp(&eqdesc, 0);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+	while (nb_fds > 0) {
+		num_to_send = (nb_fds > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_fds;
 
-	sdd = (struct qdma_sdd *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
-			QDMA_FLE_SDD_OFFSET);
-	sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
+		/* Enqueue the packet to the QBMAN */
+		enqueue_loop = 0;
+		loop = num_to_send;
+
+		while (enqueue_loop < loop) {
+			ret = qbman_swp_enqueue_multiple(swp,
+				&eqdesc,
+				&fd[num_tx + enqueue_loop],
+				NULL,
+				loop - enqueue_loop);
+			if (likely(ret >= 0))
+				enqueue_loop += ret;
+		}
+		num_tx += num_to_send;
+		nb_fds -= loop;
+	}
+
+	qdma_vq->num_enqueues += num_tx;
+	if (unlikely(num_tx != qdma_vq->fd_idx)) {
+		dst_idx = 0;
+		for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) {
+			rte_memcpy(&qdma_vq->fd[dst_idx],
+				&qdma_vq->fd[idx],
+				sizeof(struct qbman_fd));
+			dst_idx++;
+		}
+	}
+	qdma_vq->fd_idx -= num_tx;
+
+	return num_tx;
+}
+
+static void
+fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
+	struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest,
+	uint32_t fmt)
+{
+	struct qbman_fle *fle = fle_sdd->fle;
+	struct qdma_sdd *sdd = fle_sdd->sdd;
+	uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd);
 
 	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, sdd_iova);
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)));
 
 	/* source and destination descriptor */
 	if (rbp && rbp->enable) {
 		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
 		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
 		}
-		sdd++;
 		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
 		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
 		}
-
 	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
 	}
-	fle++;
 	/* source frame list to source buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
 #endif
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
-	fle++;
 	/* destination frame list to destination buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
 #endif
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
 }
 
-static inline int
-dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
-		     struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_job **job,
-		     uint16_t nb_jobs)
+static void
+sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 {
+	uint16_t i;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+		/* source SG */
+		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+		/* destination SG */
+		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+	}
+}
+
+static void
+fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx,
+	struct qdma_virt_queue *qdma_vq)
+{
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	size_t iova;
-	int ret = 0, loop;
-	int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
-				0 : 1;
-
-	for (loop = 0; loop < nb_jobs; loop++) {
-		if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
-			iova = (size_t)job[loop]->dest;
-		else
-			iova = (size_t)job[loop]->src;
-
-		/* Set the metadata */
-		job[loop]->vq_id = qdma_vq->vq_id;
-		ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-		*ppjob = job[loop];
-
-		if ((rbp->drbp == 1) || (rbp->srbp == 1))
-			ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], rbp, ser);
-		else
-			ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], ser);
+
+	memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg));
+
+	src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge);
+	dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge);
+
+	sg_entry_pre_populate(sg_cntx);
+	fle_sdd_pre_populate(&sg_cntx->fle_sdd,
+		rbp, src_sge_iova, dst_sge_iova,
+		QBMAN_FLE_WORD4_FMT_SGE);
+}
+
+static inline uint32_t
+sg_entry_post_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i;
+	uint32_t total_len = 0;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < (nb_sge - 1); i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
+
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
+		total_len += dst[i].length;
+
+		src_sge->ctrl.f = 0;
+		dst_sge->ctrl.f = 0;
+		src_sge++;
+		dst_sge++;
 	}
 
-	return ret;
+	if (unlikely(src[i].length != dst[i].length))
+		return -ENOTSUP;
+
+	src_sge->addr_lo = (uint32_t)src[i].addr;
+	src_sge->addr_hi = (src[i].addr >> 32);
+	src_sge->data_len.data_len_sl0 = src[i].length;
+
+	dst_sge->addr_lo = (uint32_t)dst[i].addr;
+	dst_sge->addr_hi = (dst[i].addr >> 32);
+	dst_sge->data_len.data_len_sl0 = dst[i].length;
+
+	total_len += dst[i].length;
+	sg_cntx->job_nb = nb_sge;
+
+	src_sge->ctrl.f = QDMA_SG_F;
+	dst_sge->ctrl.f = QDMA_SG_F;
+
+	return total_len;
+}
+
+static inline void
+sg_fle_post_populate(struct qbman_fle fle[],
+	size_t len)
+{
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
-static uint32_t
-qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
-		       struct qdma_sg_entry *src_sge,
-		       struct qdma_sg_entry *dst_sge,
-		       uint16_t nb_jobs)
+static inline uint32_t
+sg_entry_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
 {
 	uint16_t i;
 	uint32_t total_len = 0;
-	uint64_t iova;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < nb_jobs; i++) {
-		/* source SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
-			src_sge->addr_lo = (uint32_t)jobs[i]->src;
-			src_sge->addr_hi = (jobs[i]->src >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
-			src_sge->addr_lo = (uint32_t)iova;
-			src_sge->addr_hi = iova >> 32;
-		}
-		src_sge->data_len.data_len_sl0 = jobs[i]->len;
+	for (i = 0; i < nb_sge; i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -235,16 +385,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		/* destination SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
-			dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
-			dst_sge->addr_hi = (jobs[i]->dest >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
-			dst_sge->addr_lo = (uint32_t)iova;
-			dst_sge->addr_hi = iova >> 32;
-		}
-		dst_sge->data_len.data_len_sl0 = jobs[i]->len;
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -252,9 +395,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += jobs[i]->len;
+		total_len += src[i].length;
 
-		if (i == (nb_jobs - 1)) {
+		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
 			dst_sge->ctrl.f = QDMA_SG_F;
 		} else {
@@ -265,325 +408,452 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 		dst_sge++;
 	}
 
+	sg_cntx->job_nb = nb_sge;
+
 	return total_len;
 }
 
-static inline int
-dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
-				  struct qbman_fd *fd,
-				  struct rte_dpaa2_qdma_job **job,
-				  uint16_t nb_jobs)
+static inline void
+fle_populate(struct qbman_fle fle[],
+	struct qdma_sdd sdd[], uint64_t sdd_iova,
+	struct dpaa2_qdma_rbp *rbp,
+	uint64_t src_iova, uint64_t dst_iova, size_t len,
+	uint32_t fmt)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
-	void *elem;
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		(DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))));
 
-	for (i = 0; i < nb_jobs; i++) {
-		elem = job[i]->usr_elem;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem);
-#endif
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+		if (rbp->srbp) {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
+		}
+		/* destination */
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
-		job[i]->vq_id = qdma_vq->vq_id;
+		if (rbp->drbp) {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
+		}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	} else {
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
+	}
+	/* source frame list to source buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+#endif
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
+	/* destination frame list to destination buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+#endif
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-				DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
+}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-			job[i]->src, job[i]->dest, job[i]->len,
-			job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
-	}
+static inline void
+fle_post_populate(struct qbman_fle fle[],
+	uint64_t src, uint64_t dest, size_t len)
+{
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-	return 0;
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
 static inline int
-dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
-			   struct qbman_fd *fd,
-			   struct rte_dpaa2_qdma_job **job,
-			   uint16_t nb_jobs)
+dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	uint16_t expected = qdma_vq->fd_idx;
 	int ret;
-	void *elem[DPAA2_QDMA_MAX_DESC];
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
 
-	ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
+	ret = dpaa2_qdma_multi_eq(qdma_vq);
+	if (likely(ret == expected))
+		return 0;
 
-	for (i = 0; i < nb_jobs; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem[i]);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
-#endif
+	return -EBUSY;
+}
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem[i] +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+static inline void
+dpaa2_qdma_fle_dump(const struct qbman_fle *fle)
+{
+	DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d",
+		fle->addr_hi, fle->addr_lo, fle->length, fle->frc,
+		fle->word4.bpid);
+	DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d",
+		fle->word4.ivp, fle->word4.bmt, fle->word4.offset,
+		fle->word4.fmt, fle->word4.sl, fle->word4.f);
+}
 
-		job[i]->vq_id = qdma_vq->vq_id;
+static inline void
+dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd)
+{
+	DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x",
+		sdd->stride, sdd->rbpcmd, sdd->cmd);
+}
+
+static inline void
+dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge)
+{
+	DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x",
+		sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0,
+		sge->ctrl_fields);
+}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+static void
+dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
+{
+	int i;
+	const struct qdma_cntx_fle_sdd *fle_sdd;
+	const struct qdma_sdd *sdd;
+	const struct qdma_cntx_sg *cntx_sg = NULL;
+	const struct qdma_cntx_long *cntx_long = NULL;
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
-		DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
+	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
+	sdd = fle_sdd->sdd;
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) {
+		DPAA2_QDMA_INFO("fle[%d] info:", i);
+		dpaa2_qdma_fle_dump(&fle[i]);
+	}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-				job[i]->src, job[i]->dest, job[i]->len,
-				job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
+	if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
+		fle[DPAA2_QDMA_DST_FLE].word4.fmt) {
+		DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)",
+			DPAA2_QDMA_SRC_FLE,
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt,
+			DPAA2_QDMA_DST_FLE,
+			fle[DPAA2_QDMA_DST_FLE].word4.fmt);
+
+		return;
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SGE) {
+		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
+			fle_sdd);
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SBF) {
+		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
+			fle_sdd);
+	} else {
+		DPAA2_QDMA_ERR("Unsupported fle format:%d",
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
+		return;
 	}
 
-	return 0;
+	for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) {
+		DPAA2_QDMA_INFO("sdd[%d] info:", i);
+		dpaa2_qdma_sdd_dump(&sdd[i]);
+	}
+
+	if (cntx_long) {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
+			cntx_long->cntx_idx);
+	}
+
+	if (cntx_sg) {
+		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
+			cntx_sg->job_nb);
+		if (!cntx_sg->job_nb ||
+			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			DPAA2_QDMA_ERR("Invalid SG job number:%d",
+				cntx_sg->job_nb);
+			return;
+		}
+		for (i = 0; i < cntx_sg->job_nb; i++) {
+			DPAA2_QDMA_INFO("sg[%d] src info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]);
+			DPAA2_QDMA_INFO("sg[%d] dst info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]);
+			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
+				cntx_sg->cntx_idx[i]);
+		}
+	}
 }
 
-static inline int
-dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
-			struct qbman_fd *fd,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
+static int
+dpaa2_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	void *elem;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected, i;
+	uint32_t len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_sg *cntx_sg = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova, src, dst;
-	int ret = 0, i;
-	struct qdma_sg_entry *src_sge, *dst_sge;
-	uint32_t len, fmt, flags;
-
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
-		elem = job[0]->usr_elem;
+	struct qdma_sdd *sdd;
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
+			nb_src, nb_dst);
+		return -ENOTSUP;
+	}
+
+	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
+			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+		return -EINVAL;
+	}
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_dev->is_silent) {
+		cntx_sg = qdma_vq->cntx_sg[qdma_vq->silent_idx];
 	} else {
-		ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
-		if (ret) {
-			DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_sg);
+		if (ret)
 			return ret;
-		}
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+		for (i = 0; i < nb_src; i++)
+			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	elem_iova = rte_mempool_virt2iova(elem);
+	cntx_iova = rte_mempool_virt2iova(cntx_sg);
 #else
-	elem_iova = DPAA2_VADDR_TO_IOVA(elem);
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
 #endif
 
-	/* Set the metadata */
-	/* Save job context. */
-	*((uint16_t *)
-	((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
-	ppjob = (struct rte_dpaa2_qdma_job **)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
-	for (i = 0; i < nb_jobs; i++)
-		ppjob[i] = job[i];
-
-	ppjob[0]->vq_id = qdma_vq->vq_id;
-
-	fle = (struct qbman_fle *)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-	fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	fle = cntx_sg->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_sg, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
 	DPAA2_SET_FD_ADDR(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
-		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
+			if (!qdma_dev->is_silent && cntx_sg && idx_addr) {
+				for (i = 0; i < nb_src; i++)
+					cntx_sg->cntx_idx[i] = idx_addr[i];
+			}
+		}
 
-	/* Populate FLE */
-	if (likely(nb_jobs > 1)) {
-		src_sge = (struct qdma_sg_entry *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
-		dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
-		src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
-		dst = src +
-			DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
-		len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
-		fmt = QBMAN_FLE_WORD4_FMT_SGE;
-		flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
+		len = sg_entry_post_populate(src, dst,
+			cntx_sg, nb_src);
+		sg_fle_post_populate(fle, len);
 	} else {
-		src = job[0]->src;
-		dst = job[0]->dest;
-		len = job[0]->len;
-		fmt = QBMAN_FLE_WORD4_FMT_SBF;
-		flags = job[0]->flags;
+		sdd = cntx_sg->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		src_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_src_entry);
+		dst_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_dst_entry);
+		len = sg_entry_populate(src, dst,
+			cntx_sg, nb_src);
+
+		fle_populate(fle, sdd, sdd_iova,
+			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
+			QBMAN_FLE_WORD4_FMT_SGE);
 	}
 
-	memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
-
-	dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-					src, dst, len, flags, fmt);
-
-	return 0;
-}
-
-static inline uint16_t
-dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
-		      const struct qbman_fd *fd,
-		      struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
-{
-	uint16_t vqid;
-	size_t iova;
-	struct rte_dpaa2_qdma_job **ppjob;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
-	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
-		iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
-				| (uint64_t)fd->simple_pci.daddr_lo);
-	else
-		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
-				| (uint64_t)fd->simple_pci.saddr_lo);
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-	*job = (struct rte_dpaa2_qdma_job *)*ppjob;
-	(*job)->status = (fd->simple_pci.acc_err << 8) |
-					(fd->simple_pci.error);
-	vqid = (*job)->vq_id;
-	*nb_jobs = 1;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return vqid;
+	return ret;
 }
 
-static inline uint16_t
-dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-			     const struct qbman_fd *fd,
-			     struct rte_dpaa2_qdma_job **job,
-			     uint16_t *nb_jobs)
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_long *cntx_long = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t status;
+	struct qdma_sdd *sdd;
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	memset(fd, 0, sizeof(struct qbman_fd));
 
-	*nb_jobs = 1;
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-			QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
+	if (qdma_dev->is_silent) {
+		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	} else {
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_long);
+		if (ret)
+			return ret;
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
+	}
 
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	cntx_iova = rte_mempool_virt2iova(cntx_long);
+#else
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
+#endif
 
-	*job = *ppjob;
-	(*job)->status = status;
+	fle = cntx_long->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_long, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+				&qdma_vq->rbp,
+				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+			if (!qdma_dev->is_silent && cntx_long) {
+				cntx_long->cntx_idx =
+					DPAA2_QDMA_IDX_FROM_FLAG(flags);
+			}
+		}
 
-	return (*job)->vq_id;
-}
+		fle_post_populate(fle, src, dst, length);
+	} else {
+		sdd = cntx_long->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_long, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
+			src, dst, length,
+			QBMAN_FLE_WORD4_FMT_SBF);
+	}
 
-static inline uint16_t
-dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-			 const struct qbman_fd *fd,
-			 struct rte_dpaa2_qdma_job **job,
-			 uint16_t *nb_jobs)
-{
-	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t i, status;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	*nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	for (i = 0; i < (*nb_jobs); i++) {
-		job[i] = ppjob[i];
-		job[i]->status = status;
-	}
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return job[0]->vq_id;
+	return ret;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
-static int
-dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-				     uint16_t *vq_id,
-				     struct rte_dpaa2_qdma_job **job,
-				     uint16_t nb_jobs)
+static uint16_t
+dpaa2_qdma_dequeue(void *dev_private,
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *cntx_idx, bool *has_error)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
 	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
 	uint8_t status, pending;
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
 	int ret, pull_size;
+	struct qbman_fle *fle;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_long *cntx_long;
+	uint16_t free_space = 0, fle_elem_nb = 0;
 
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
+	if (unlikely(qdma_dev->is_silent))
+		return 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
+			DPAA2_QDMA_ERR("Allocate portal err, tid(%d)",
 				rte_gettid());
+			if (has_error)
+				*has_error = true;
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_cpls > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_cpls;
+	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
+	fqid = rxq->fqid;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -592,21 +862,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->last_num_pkts = pull_size;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      q_storage->last_num_pkts);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+			q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
-				get_swp_active_dqs(
-				DPAA2_PER_LCORE_DPIO->index)))
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
 				;
 			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued.QBMAN busy");
+				DPAA2_QDMA_DP_WARN("QBMAN busy");
 					/* Portal was busy, try again */
 				continue;
 			}
@@ -615,7 +884,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->active_dqs = dq_storage;
 		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
-				   dq_storage);
+			dq_storage);
 	}
 
 	dq_storage = q_storage->active_dqs;
@@ -629,7 +898,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
-	qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
 
@@ -662,563 +931,239 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-
-		vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
-								&num_rx_ret);
-		if (vq_id)
-			vq_id[num_rx] = vqid;
-
-		dq_storage++;
-		num_rx += num_rx_ret;
-	} while (pending);
-
-	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-		while (!qbman_check_command_complete(
-			get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-			;
-		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
-	}
-	/* issue a volatile dequeue command for next pull */
-	while (1) {
-		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN(
-				"VDQ command is not issued. QBMAN is busy (2)");
-			continue;
-		}
-		break;
-	}
-
-	q_storage->active_dqs = dq_storage1;
-	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
-	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-					uint16_t *vq_id,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
-	struct qbman_result *dq_storage;
-	struct qbman_pull_desc pulldesc;
-	struct qbman_swp *swp;
-	uint8_t status, pending;
-	uint8_t num_rx = 0;
-	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
-	int ret, next_pull, num_pulled = 0;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
-
-	next_pull = nb_jobs;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	rxq = &(dpdmai_dev->rx_queue[0]);
-
-	do {
-		dq_storage = rxq->q_storage->dq_storage[0];
-		/* Prepare dequeue descriptor */
-		qbman_pull_desc_clear(&pulldesc);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
-		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-
-		if (next_pull > dpaa2_dqrr_size) {
-			qbman_pull_desc_set_numframes(&pulldesc,
-					dpaa2_dqrr_size);
-			next_pull -= dpaa2_dqrr_size;
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		fle = fle_sdd->fle;
+		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
+		fle_elem_nb++;
+		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+			QBMAN_FLE_WORD4_FMT_SGE) {
+			cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, &free_space);
 		} else {
-			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
-			next_pull = 0;
-		}
-
-		while (1) {
-			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued. QBMAN busy");
-				/* Portal was busy, try again */
-				continue;
-			}
-			break;
-		}
-
-		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-		/* Check if the previous issued command is completed. */
-		while (!qbman_check_command_complete(dq_storage))
-			;
-
-		num_pulled = 0;
-		pending = 1;
-
-		do {
-			/* Loop until dq_storage is updated
-			 * with new token by QBMAN
-			 */
-			while (!qbman_check_new_result(dq_storage))
-				;
-			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-
-			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-				pending = 0;
-				/* Check for valid frame. */
-				status = qbman_result_DQ_flags(dq_storage);
-				if (unlikely((status &
-					QBMAN_DQ_STAT_VALIDFRAME) == 0))
-					continue;
-			}
-			fd = qbman_result_DQ_fd(dq_storage);
-
-			vqid = qdma_vq->get_job(qdma_vq, fd,
-						&job[num_rx], &num_rx_ret);
-			if (vq_id)
-				vq_id[num_rx] = vqid;
-
-			dq_storage++;
-			num_rx += num_rx_ret;
-			num_pulled++;
-
-		} while (pending);
-	/* Last VDQ provided all packets and more packets are requested */
-	} while (next_pull && num_pulled == dpaa2_dqrr_size);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
-	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
-	struct qbman_eq_desc eqdesc;
-	struct qbman_swp *swp;
-	uint32_t num_to_send = 0;
-	uint16_t num_tx = 0;
-	uint32_t enqueue_loop, loop;
-	int ret;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
+			cntx_long = container_of(fle_sdd,
+				struct qdma_cntx_long, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&cntx_long->cntx_idx,
+				1, &free_space);
 		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	/* Prepare enqueue descriptor */
-	qbman_eq_desc_clear(&eqdesc);
-	qbman_eq_desc_set_fq(&eqdesc, txq_id);
-	qbman_eq_desc_set_no_orp(&eqdesc, 0);
-	qbman_eq_desc_set_response(&eqdesc, 0, 0);
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		uint16_t fd_nb;
-		uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		uint16_t job_idx = 0;
-		uint16_t fd_sg_nb[8];
-		uint16_t nb_jobs_ret = 0;
-
-		if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
-		else
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
-
-		memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
-
-		for (loop = 0; loop < fd_nb; loop++) {
-			ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
-					      sg_entry_nb);
-			if (unlikely(ret < 0))
-				return 0;
-			fd_sg_nb[loop] = sg_entry_nb;
-			nb_jobs -= sg_entry_nb;
-			job_idx += sg_entry_nb;
-			sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		}
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-
-		while (enqueue_loop < fd_nb) {
-			ret = qbman_swp_enqueue_multiple(swp,
-					&eqdesc, &fd[enqueue_loop],
-					NULL, fd_nb - enqueue_loop);
-			if (likely(ret >= 0)) {
-				for (loop = 0; loop < (uint32_t)ret; loop++)
-					nb_jobs_ret +=
-						fd_sg_nb[enqueue_loop + loop];
-				enqueue_loop += ret;
-			}
-		}
-
-		return nb_jobs_ret;
-	}
-
-	memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
-
-	while (nb_jobs > 0) {
-		num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
-			dpaa2_eqcr_size : nb_jobs;
-
-		ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
-						&job[num_tx], num_to_send);
-		if (unlikely(ret < 0))
-			break;
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-		loop = num_to_send;
-
-		while (enqueue_loop < loop) {
-			ret = qbman_swp_enqueue_multiple(swp,
-						&eqdesc,
-						&fd[num_tx + enqueue_loop],
-						NULL,
-						loop - enqueue_loop);
-			if (likely(ret >= 0))
-				enqueue_loop += ret;
-		}
-		num_tx += num_to_send;
-		nb_jobs -= loop;
-	}
-
-	qdma_vq->num_enqueues += num_tx;
-
-	return num_tx;
-}
-
-static inline int
-dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
-				qdma_vq->num_valid_jobs);
-
-	qdma_vq->num_valid_jobs = 0;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-		   rte_iova_t src, rte_iova_t dst,
-		   uint32_t length, uint64_t flags)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *job;
-	int idx, ret;
-
-	idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
-
-	ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return -ENOSPC;
-	}
-
-	job->src = src;
-	job->dest = dst;
-	job->len = length;
-	job->flags = flags;
-	job->status = 0;
-	job->vq_id = vchan;
-
-	qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
-	qdma_vq->num_valid_jobs++;
-
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
-		dpaa2_qdma_submit(dev_private, vchan);
-
-	return idx;
-}
-
-int
-rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-			  struct rte_dpaa2_qdma_job **jobs,
-			  uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
-			 struct qdma_virt_queue *qdma_vq,
-			 struct rte_dpaa2_qdma_job **jobs,
-			 uint16_t nb_jobs)
-{
-	struct qdma_virt_queue *temp_qdma_vq;
-	int ring_count;
-	int ret = 0, i;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-	}
-
-	/* Only dequeue when there are pending jobs on VQ */
-	if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
-		return 0;
-
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
-		qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
-		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
-				qdma_vq->num_dequeues), nb_jobs);
-
-	if (qdma_vq->exclusive_hw_queue) {
-		/* In case of exclusive queue directly fetch from HW queue */
-		ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-		if (ret < 0) {
-			DPAA2_QDMA_ERR(
-				"Dequeue from DPDMAI device failed: %d", ret);
-			return ret;
-		}
-	} else {
-		uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
+		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+			pending = 0;
 
-		/* Get the QDMA completed jobs from the software ring.
-		 * In case they are not available on the ring poke the HW
-		 * to fetch completed jobs from corresponding HW queues
-		 */
-		ring_count = rte_ring_count(qdma_vq->status_ring);
-		if (ring_count < nb_jobs) {
-			ret = qdma_vq->dequeue_job(qdma_vq,
-					temp_vq_id, jobs, nb_jobs);
-			for (i = 0; i < ret; i++) {
-				temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
-				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(jobs[i]));
-			}
-			ring_count = rte_ring_count(
-					qdma_vq->status_ring);
-		}
+		dq_storage++;
+	} while (pending);
 
-		if (ring_count) {
-			/* Dequeue job from the software ring
-			 * to provide to the user
-			 */
-			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-						    (void **)jobs,
-						    ring_count, NULL);
+	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+		while (!qbman_check_command_complete(
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+			;
+		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+	}
+	/* issue a volatile dequeue command for next pull */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_QDMA_DP_WARN("QBMAN is busy (2)");
+			continue;
 		}
+		break;
 	}
 
-	qdma_vq->num_dequeues += ret;
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			  const uint16_t nb_cpls,
-			  uint16_t *last_idx,
-			  enum rte_dma_status_code *st)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret, i;
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
-
-	for (i = 0; i < ret; i++)
-		st[i] = jobs[i]->status;
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue(void *dev_private,
-		   uint16_t vchan, const uint16_t nb_cpls,
-		   uint16_t *last_idx, bool *has_error)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret;
-
-	RTE_SET_USED(has_error);
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
-				jobs, nb_cpls);
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
+	q_storage->active_dqs = dq_storage1;
+	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
+	rte_mempool_put_bulk(qdma_vq->fle_pool,
+		qdma_vq->fle_elem, fle_elem_nb);
 
-	return ret;
-}
+	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
+		cntx_idx, nb_cpls);
 
-uint16_t
-rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-			       struct rte_dpaa2_qdma_job **jobs,
-			       uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	if (has_error)
+		*has_error = false;
 
-	return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	return num_rx;
 }
 
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
-		    struct rte_dma_info *dev_info,
-		    uint32_t info_sz)
+	struct rte_dma_info *dev_info,
+	uint32_t info_sz __rte_unused)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+		RTE_DMA_CAPA_MEM_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_MEM |
+		RTE_DMA_CAPA_SILENT |
+		RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->dev_name = dev->device->name;
+	if (dpdmai_dev->qdma_dev)
+		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
 
 	return 0;
 }
 
 static int
 dpaa2_qdma_configure(struct rte_dma_dev *dev,
-		     const struct rte_dma_conf *dev_conf,
-		     uint32_t conf_sz)
+	const struct rte_dma_conf *dev_conf,
+	uint32_t conf_sz)
 {
-	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint16_t i;
+	struct dpdmai_rx_queue_cfg rx_queue_cfg;
+	struct dpdmai_rx_queue_attr rx_attr;
+	struct dpdmai_tx_queue_attr tx_attr;
+	struct dpaa2_queue *rxq;
+	int ret = 0;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before config.");
-		return -1;
-	}
+	if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+		DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+			dev->data->dev_name, dev_conf->nb_vchans,
+			dpdmai_dev->num_queues);
 
-	/* Allocate Virtual Queues */
-	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
-	qdma_dev->vqs = rte_malloc(name,
-			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
-			RTE_CACHE_LINE_SIZE);
-	if (!qdma_dev->vqs) {
-		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
-		return -ENOMEM;
+		return -ENOTSUP;
 	}
-	qdma_dev->num_vqs = dev_conf->nb_vchans;
-
-	return 0;
-}
-
-static int
-check_devargs_handler(__rte_unused const char *key,
-		      const char *value,
-		      __rte_unused void *opaque)
-{
-	if (strcmp(value, "1"))
-		return -1;
 
-	return 0;
-}
+	if (qdma_dev->vqs) {
+		DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+			dev->data->dev_name,
+			qdma_dev->num_vqs, dev_conf->nb_vchans);
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if ((qdma_dev->vqs[i].num_enqueues !=
+				qdma_dev->vqs[i].num_dequeues) &&
+				!qdma_dev->is_silent) {
+				DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+					i, qdma_dev->vqs[i].num_enqueues -
+					qdma_dev->vqs[i].num_dequeues);
+				return -EBUSY;
+			}
+		}
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+			rxq = &dpdmai_dev->rx_queue[i];
+			if (rxq->q_storage) {
+				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+					dev->data->dev_name, i);
+				dpaa2_free_dq_storage(rxq->q_storage);
+				rte_free(rxq->q_storage);
+				rxq->q_storage = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+		qdma_dev->num_vqs = 0;
+	}
 
-static int
-dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
-{
-	struct rte_kvargs *kvlist;
+	/* Set up Rx Queues */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		rxq = &dpdmai_dev->rx_queue[i];
+		ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			return ret;
+		}
+	}
 
-	if (!devargs)
-		return 0;
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
 
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (!kvlist)
-		return 0;
+		ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+	}
 
-	if (!rte_kvargs_count(kvlist, key)) {
-		rte_kvargs_free(kvlist);
-		return 0;
+	/* Allocate Virtual Queues */
+	qdma_dev->vqs = rte_zmalloc(NULL,
+		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+		RTE_CACHE_LINE_SIZE);
+	if (!qdma_dev->vqs) {
+		DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.",
+			dev->data->dev_name, dev_conf->nb_vchans);
+		return -ENOMEM;
 	}
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		qdma_dev->vqs[i].vq_id = i;
+		rxq = &dpdmai_dev->rx_queue[i];
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq->q_storage = rte_zmalloc(NULL,
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 
-	if (rte_kvargs_process(kvlist, key,
-			       check_devargs_handler, NULL) < 0) {
-		rte_kvargs_free(kvlist);
-		return 0;
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 	}
-	rte_kvargs_free(kvlist);
 
-	return 1;
-}
+	qdma_dev->num_vqs = dev_conf->nb_vchans;
+	qdma_dev->is_silent = dev_conf->enable_silent;
 
-/* Enable FD in Ultra Short format */
-void
-rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	return 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
-}
+alloc_failed:
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
 
-/* Enable internal SG processing */
-void
-rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	rte_free(qdma_dev->vqs);
+	qdma_dev->vqs = NULL;
+	qdma_dev->num_vqs = 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
+	return ret;
 }
 
 static int
@@ -1257,16 +1202,14 @@ dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 
 static int
 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
-		       const struct rte_dma_vchan_conf *conf,
-		       uint32_t conf_sz)
+	const struct rte_dma_vchan_conf *conf,
+	uint32_t conf_sz)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
-	char ring_name[32];
 	char pool_name[64];
-	int fd_long_format = 1;
-	int sg_enable = 0, ret;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1276,99 +1219,67 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	if (ret)
 		return ret;
 
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
-		sg_enable = 1;
-
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
-		fd_long_format = 0;
-
-	if (dev->data->dev_conf.enable_silent)
-		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
-
-	if (sg_enable) {
-		if (qdma_dev->num_vqs != 1) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports physical queue!");
-			return -ENODEV;
-		}
-		if (!fd_long_format) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports long FD format!");
-			return -ENODEV;
-		}
-		pool_size = QDMA_FLE_SG_POOL_SIZE;
-	} else {
-		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_FLE_PRE_POPULATE))
+		qdma_dev->vqs[vchan].fle_pre_populate = 1;
+	else
+		qdma_dev->vqs[vchan].fle_pre_populate = 0;
 
-	if (qdma_dev->num_vqs == 1)
-		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
-	else {
-		/* Allocate a Ring for Virtual Queue in VQ mode */
-		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
-			 dev->data->dev_id, vchan);
-		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
-			conf->nb_desc, rte_socket_id(), 0);
-		if (!qdma_dev->vqs[vchan].status_ring) {
-			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			return rte_errno;
-		}
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_DESC_DEBUG))
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG;
+	else
+		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
+			    sizeof(struct qdma_cntx_long));
+
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+			DPAA2_QDMA_MAX_DESC * 2, pool_size,
+			512, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
 	if (!qdma_dev->vqs[vchan].fle_pool) {
-		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
-		return -ENOMEM;
-	}
-
-	snprintf(pool_name, sizeof(pool_name),
-		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev->vqs[vchan].job_pool) {
-		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
 
-	if (fd_long_format) {
-		if (sg_enable) {
-			qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
-		} else {
-			if (dev->data->dev_conf.enable_silent)
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf_no_rsp;
-			else
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
+	if (qdma_dev->is_silent) {
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_sg,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("sg cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
+		}
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_long,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
 		}
 	} else {
-		qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
-		qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
-	}
-
-	if (dpaa2_qdma_get_devargs(dev->device->devargs,
-			DPAA2_QDMA_PREFETCH)) {
-		/* If no prefetch is configured. */
-		qdma_dev->vqs[vchan].dequeue_job =
-				dpdmai_dev_dequeue_multijob_prefetch;
-		DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
-	} else {
-		qdma_dev->vqs[vchan].dequeue_job =
-			dpdmai_dev_dequeue_multijob_no_prefetch;
+		qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL,
+				sizeof(struct qdma_cntx_idx_ring),
+				RTE_CACHE_LINE_SIZE);
+		if (!qdma_dev->vqs[vchan].ring_cntx_idx) {
+			DPAA2_QDMA_ERR("DQ response ring alloc failed.");
+			return -ENOMEM;
+		}
+		qdma_dev->vqs[vchan].ring_cntx_idx->start = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->free_space =
+				QDMA_CNTX_IDX_RING_MAX_FREE;
+		qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0;
+		qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL,
+				sizeof(void *) * DPAA2_QDMA_MAX_DESC,
+				RTE_CACHE_LINE_SIZE);
 	}
 
 	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
 	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
-	qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
 
 	return 0;
 }
@@ -1377,11 +1288,17 @@ static int
 dpaa2_qdma_start(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 1;
+	/* Enable the device */
+	ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1390,46 +1307,71 @@ static int
 dpaa2_qdma_stop(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 0;
+	/* Disable the device */
+	ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
 
 static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct dpaa2_queue *rxq;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before reset.");
-		return -EBUSY;
-	}
+	if (!qdma_dev)
+		return 0;
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
-		    qdma_dev->vqs[i].num_dequeues)) {
-			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+		if ((qdma_dev->vqs[i].num_enqueues !=
+		    qdma_dev->vqs[i].num_dequeues) &&
+		    !qdma_dev->is_silent) {
+			DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")",
+				i, qdma_dev->vqs[i].num_enqueues,
+				qdma_dev->vqs[i].num_dequeues);
 			return -EBUSY;
 		}
 	}
 
-	/* Reset and free virtual queues */
+	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		rte_ring_free(qdma_dev->vqs[i].status_ring);
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	if (qdma_dev->vqs) {
+		/* Free RXQ fle pool */
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
 	}
-	rte_free(qdma_dev->vqs);
-	qdma_dev->vqs = NULL;
 
 	/* Reset QDMA device structure */
 	qdma_dev->num_vqs = 0;
@@ -1438,18 +1380,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 }
 
 static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	dpaa2_qdma_reset(dev);
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1504,123 +1436,44 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			     dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("dmdmai disable failed");
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
+			dpdmai_dev->dpdmai_id);
+		return 0;
+	}
 
-	/* Set up the DQRR storage for Rx */
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	/* Close the device at underlying layer*/
+	ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+			dpdmai_dev->dpdmai_id, ret);
 
-	if (rxq->q_storage) {
-		dpaa2_free_dq_storage(rxq->q_storage);
-		rte_free(rxq->q_storage);
+		return ret;
 	}
 
-	/* Close the device at underlying layer*/
-	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("Failure closing dpdmai device");
+	if (qdma_dev) {
+		rte_free(qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
 
-	return 0;
+	return ret;
 }
 
 static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpdmai_rx_queue_cfg rx_queue_cfg;
 	struct dpdmai_attr attr;
-	struct dpdmai_rx_queue_attr rx_attr;
-	struct dpdmai_tx_queue_attr tx_attr;
-	struct dpaa2_queue *rxq;
-	int ret;
+	int ret, err;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* Open DPDMAI device */
-	dpdmai_dev->dpdmai_id = dpdmai_id;
-	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
-					  RTE_CACHE_LINE_SIZE);
-	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
-		return ret;
-	}
-
-	/* Get DPDMAI attributes */
-	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				    dpdmai_dev->token, &attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->num_queues = attr.num_of_queues;
-
-	/* Set up Rx Queue */
-	memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-	ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				  CMD_PRI_LOW,
-				  dpdmai_dev->token,
-				  0, 0, &rx_queue_cfg);
-	if (ret) {
-		DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-
-	/* Allocate DQ storage for the DPDMAI Rx queues */
-	rxq = &(dpdmai_dev->rx_queue[0]);
-	rxq->q_storage = rte_malloc("dq_storage",
-				    sizeof(struct queue_storage_info_t),
-				    RTE_CACHE_LINE_SIZE);
-	if (!rxq->q_storage) {
-		DPAA2_QDMA_ERR("q_storage allocation failed");
-		ret = -ENOMEM;
-		goto init_err;
-	}
-
-	memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-	ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
-		goto init_err;
-	}
-
-	/* Get Rx and Tx queues FQID */
-	ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &rx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
-
-	ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &tx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
-
-	/* Enable the device */
-	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			    dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
-		goto init_err;
-	}
-
 	if (!dpaa2_coherent_no_alloc_cache) {
 		if (dpaa2_svr_family == SVR_LX2160A) {
 			dpaa2_coherent_no_alloc_cache =
@@ -1635,24 +1488,76 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 		}
 	}
 
-	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+	if (!s_proc_mc_reg.regs)
+		s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+			dpdmai_id);
+		if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+			DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+				dpdmai_id, dpdmai_dev->dpdmai_id);
+			return -EINVAL;
+		}
+		if (!dpdmai_dev->qdma_dev) {
+			DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+				dpdmai_id);
+			return -ENOMEM;
+		}
+		if (dpdmai_dev->qdma_dev->num_vqs) {
+			DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+				dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+		}
+
+		return 0;
+	}
+
+	/* Open DPDMAI device */
+	dpdmai_dev->dpdmai_id = dpdmai_id;
+
+	if (dpdmai_dev->qdma_dev) {
+		rte_free(dpdmai_dev->qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
+	dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
+		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
+	if (!dpdmai_dev->qdma_dev) {
+		DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+			dpdmai_id);
+		return -ENOMEM;
+	}
+	ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		return ret;
+	}
 
-	/* Reset the QDMA device */
-	ret = dpaa2_qdma_reset(dev);
+	/* Get DPDMAI attributes */
+	ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token, &attr);
 	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		goto init_err;
+		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+		if (err) {
+			DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+				dpdmai_dev->dpdmai_id, err);
+		}
+		return ret;
 	}
+	dpdmai_dev->num_queues = attr.num_of_queues;
+
+	DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
 
 	return 0;
-init_err:
-	dpaa2_dpdmai_dev_uninit(dev);
-	return ret;
 }
 
 static int
 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
-		 struct rte_dpaa2_device *dpaa2_dev)
+	struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_dma_dev *dmadev;
 	int ret;
@@ -1662,8 +1567,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	RTE_SET_USED(dpaa2_drv);
 
 	dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
-				      rte_socket_id(),
-				      sizeof(struct dpaa2_dpdmai_dev));
+		rte_socket_id(),
+		sizeof(struct dpaa2_dpdmai_dev));
 	if (!dmadev) {
 		DPAA2_QDMA_ERR("Unable to allocate dmadevice");
 		return -EINVAL;
@@ -1673,10 +1578,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	dmadev->dev_ops = &dpaa2_qdma_ops;
 	dmadev->device = &dpaa2_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
-	dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
+	dmadev->fp_obj->copy = dpaa2_qdma_copy;
+	dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa2_qdma_submit;
 	dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
-	dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
 	dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
@@ -1718,5 +1623,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
-	"no_prefetch=<int> ");
+	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
+	DPAA2_QDMA_DESC_DEBUG"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 811906fcbc..371393cb85 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,11 +1,14 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
-#define DPAA2_QDMA_MAX_DESC		1024
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+#define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
@@ -13,48 +16,9 @@
 #define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
 #define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
 
-#define DPAA2_QDMA_MAX_FLE 3
-#define DPAA2_QDMA_MAX_SDD 2
-
-#define DPAA2_QDMA_MAX_SG_NB 64
-
-#define DPAA2_DPDMAI_MAX_QUEUES	1
-
-/** FLE single job pool size: job pointer(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor.
- */
-#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
-			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-/** FLE sg jobs pool size: job number(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor  +
- * 64 (src + dst) sg entries + 64 jobs pointers.
- */
-#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
-		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
-		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
-
-#define QDMA_FLE_JOB_NB_OFFSET 0
+#define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_FLE_SINGLE_JOB_OFFSET 0
-
-#define QDMA_FLE_FLE_OFFSET \
-		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
-
-#define QDMA_FLE_SDD_OFFSET \
-		(QDMA_FLE_FLE_OFFSET + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
-
-#define QDMA_FLE_SG_ENTRY_OFFSET \
-		(QDMA_FLE_SDD_OFFSET + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-#define QDMA_FLE_SG_JOBS_OFFSET \
-		(QDMA_FLE_SG_ENTRY_OFFSET + \
-		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
+#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
 
 /** FLE pool cache size */
 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
@@ -202,12 +166,49 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum {
+	DPAA2_QDMA_SDD_FLE,
+	DPAA2_QDMA_SRC_FLE,
+	DPAA2_QDMA_DST_FLE,
+	DPAA2_QDMA_MAX_FLE
+};
+
+enum {
+	DPAA2_QDMA_SRC_SDD,
+	DPAA2_QDMA_DST_SDD,
+	DPAA2_QDMA_MAX_SDD
+};
+
+struct qdma_cntx_fle_sdd {
+	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
+	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
+} __rte_packed;
+
+struct qdma_cntx_sg {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t job_nb;
+	uint16_t rsv[3];
+} __rte_packed;
+
+struct qdma_cntx_long {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	uint16_t cntx_idx;
+	uint16_t rsv[3];
+} __rte_packed;
+
+#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
 	TAILQ_ENTRY(dpaa2_qdma_device) next;
-	/** handle to DPDMAI object */
-	struct fsl_mc_io dpdmai;
 	/** HW ID for DPDMAI object */
 	uint32_t dpdmai_id;
 	/** Tocken of this device */
@@ -221,42 +222,30 @@ struct dpaa2_dpdmai_dev {
 	struct qdma_device *qdma_dev;
 };
 
-struct qdma_virt_queue;
-
-typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
-					const struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t *nb_jobs);
-typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
-					struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs);
-
-typedef int (qdma_dequeue_multijob_t)(
-				struct qdma_virt_queue *qdma_vq,
-				uint16_t *vq_id,
-				struct rte_dpaa2_qdma_job **job,
-				uint16_t nb_jobs);
+#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
+#define QDMA_CNTX_IDX_RING_MAX_FREE \
+	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
+struct qdma_cntx_idx_ring {
+	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
+	uint16_t start;
+	uint16_t tail;
+	uint16_t free_space;
+	uint16_t nb_in_ring;
+};
 
-typedef int (qdma_enqueue_multijob_t)(
-			struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs);
+#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
-	/** Status ring of the virtual queue */
-	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
-	uint8_t in_use;
-	/** States if this vq has exclusively associated hw queue */
-	uint8_t exclusive_hw_queue;
+	uint8_t fle_pre_populate;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
@@ -266,18 +255,18 @@ struct qdma_virt_queue {
 
 	uint16_t vq_id;
 	uint32_t flags;
+	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
+	uint16_t fd_idx;
+	struct qdma_cntx_idx_ring *ring_cntx_idx;
+
+	/**Used for silent enabled*/
+	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	uint16_t silent_idx;
 
-	struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
-	struct rte_mempool *job_pool;
 	int num_valid_jobs;
 
 	struct rte_dma_stats stats;
-
-	qdma_set_fd_t *set_fd;
-	qdma_get_job_t *get_job;
-
-	qdma_dequeue_multijob_t *dequeue_job;
-	qdma_enqueue_multijob_t *enqueue_job;
 };
 
 /** Represents a QDMA device. */
@@ -286,8 +275,7 @@ struct qdma_device {
 	struct qdma_virt_queue *vqs;
 	/** Total number of VQ's */
 	uint16_t num_vqs;
-	/** Device state - started or stopped */
-	uint8_t state;
+	uint8_t is_silent;
 };
 
 #endif /* _DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index b0bf9d8bcc..e49604c8fc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2023 NXP
  */
 
 #ifndef _RTE_PMD_DPAA2_QDMA_H_
@@ -7,118 +7,30 @@
 
 #include <rte_compat.h>
 
-/** States if the source addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_SRC_PHY		(1ULL << 30)
+#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
+#define RTE_DPAA2_QDMA_LEN_MASK \
+	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/** States if the destination addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
+#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
+	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
 
-/** Determines a QDMA job */
-struct rte_dpaa2_qdma_job {
-	/** Source Address from where DMA is (to be) performed */
-	uint64_t src;
-	/** Destination Address where DMA is (to be) done */
-	uint64_t dest;
-	/** Length of the DMA operation in bytes. */
-	uint32_t len;
-	/** See RTE_QDMA_JOB_ flags */
-	uint32_t flags;
-	/**
-	 * Status of the transaction.
-	 * This is filled in the dequeue operation by the driver.
-	 * upper 8bits acc_err for route by port.
-	 * lower 8bits fd error
-	 */
-	uint16_t status;
-	uint16_t vq_id;
-	uint64_t cnxt;
-	/**
-	 * FLE pool element maintained by user, in case no qDMA response.
-	 * Note: the address must be allocated from DPDK memory pool.
-	 */
-	void *usr_elem;
-};
+#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
+	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable FD in Ultra Short format on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
+	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable internal SG processing on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enqueue a copy operation onto the virtual DMA channel for silent mode,
- * when dequeue is not required.
- *
- * This queues up a copy operation to be performed by hardware, if the 'flags'
- * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
- * this operation, otherwise do not trigger doorbell.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs to be submitted to QDMA.
- * @param nb_cpls
- *   Number of DMA jobs.
- *
- * @return
- *   - >= 0..Number of enqueued job.
- *   - -ENOSPC: if no space left to enqueue.
- *   - other values < 0 on failure.
- */
-__rte_experimental
-int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Return the number of operations that have been successfully completed.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs completed by QDMA.
- * @param nb_cpls
- *   Number of completed DMA jobs.
- *
- * @return
- *   The number of operations that successfully completed. This return value
- *   must be less than or equal to the value of nb_cpls.
- */
-__rte_experimental
-uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
+#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
 
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
deleted file mode 100644
index 43e8b2d5c5..0000000000
--- a/drivers/dma/dpaa2/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-DPDK_25 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 22.07
-	rte_dpaa2_qdma_completed_multi;
-	rte_dpaa2_qdma_copy_multi;
-	rte_dpaa2_qdma_vchan_fd_us_enable;
-	rte_dpaa2_qdma_vchan_internal_sg_enable;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 03/15] bus/fslmc: enhance the qbman dq storage logic
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-14  9:36                 ` [v6 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-10-14  9:36                 ` [v6 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 04/15] dma/dpaa2: add short FD support Gagandeep Singh
                                   ` (12 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Multiple DQ storages are used among multiple cores, the single dq
storage of first union is leak if multiple storages are allocated.
It does not make sense to keep the single dq storage of union,
remove it and reuse the first dq storage of multiple storages
for this case.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c    | 25 ++-----
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     | 38 +++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 23 ++----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c |  4 +-
 drivers/dma/dpaa2/dpaa2_qdma.c              | 41 ++---------
 drivers/net/dpaa2/dpaa2_ethdev.c            | 81 ++++++++-------------
 drivers/net/dpaa2/dpaa2_rxtx.c              | 19 +++--
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c       |  4 +-
 9 files changed, 102 insertions(+), 140 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 7e858a113f..160126f6d6 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 		}
 
 		/* Allocate DQ storage for the DPCI Rx queues */
-		rxq = &(dpci_node->rx_queue[i]);
-		rxq->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_BUS_ERR("q_storage allocation failed");
-			ret = -ENOMEM;
+		rxq = &dpci_node->rx_queue[i];
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed");
-			goto err;
-		}
 	}
 
 	/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 
 err:
 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
-		struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
 
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 	rte_free(dpci_node);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 4aec7b2cd8..a8afc772fd 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
 	}
 }
 
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	int i = 0;
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
-		q_storage->dq_storage[i] = rte_malloc(NULL,
+		q_storage->dq_storage[i] = rte_zmalloc(NULL,
 			dpaa2_dqrr_size * sizeof(struct qbman_result),
 			RTE_CACHE_LINE_SIZE);
 		if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	}
 	return 0;
 fail:
-	while (--i >= 0)
+	while (--i >= 0) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
+	}
 
 	return -1;
 }
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 4c30e6db18..0e53ab9d8f 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
 	uint64_t tx_pkts;
 	uint64_t err_pkts;
 	union {
-		struct queue_storage_info_t *q_storage;
+		/**Ingress*/
+		struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+		/**Egress*/
 		struct qbman_result *cscn;
 	};
 	struct rte_event ev;
@@ -187,6 +189,38 @@ struct swp_active_dqs {
 	uint64_t reserved[7];
 };
 
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+	int ret = 0, i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		(q)->q_storage[i] = rte_zmalloc(NULL, \
+			sizeof(struct queue_storage_info_t), \
+			RTE_CACHE_LINE_SIZE); \
+		if (!(q)->q_storage[i]) { \
+			ret = -ENOBUFS; \
+			break; \
+		} \
+		ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+		if (ret) \
+			break; \
+	} \
+	ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+	int i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		if ((q)->q_storage[i]) { \
+			dpaa2_free_dq_storage((q)->q_storage[i]); \
+			rte_free((q)->q_storage[i]); \
+			(q)->q_storage[i] = NULL; \
+		} \
+	} \
+})
+
 #define NUM_MAX_SWP 64
 
 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index a1e8dffe4a..bd3ae9d9e3 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1910,7 +1910,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
@@ -2001,10 +2001,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (qp->rx_vq.q_storage) {
-		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
-		rte_free(qp->rx_vq.q_storage);
-	}
+	dpaa2_queue_storage_free(&qp->rx_vq, 1);
 	rte_mempool_free(qp->fle_pool);
 	rte_free(qp);
 
@@ -2055,18 +2052,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->rx_vq.crypto_data = dev->data;
 	qp->tx_vq.crypto_data = dev->data;
-	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
-		sizeof(struct queue_storage_info_t),
-		RTE_CACHE_LINE_SIZE);
-	if (!qp->rx_vq.q_storage) {
-		DPAA2_SEC_ERR("malloc failed for q_storage");
-		return -ENOMEM;
-	}
-	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
-	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
-		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
-		return -ENOMEM;
+	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+	if (retcode) {
+		dpaa2_queue_storage_free((&qp->rx_vq), 1);
+		return retcode;
 	}
 
 	dev->data->queue_pairs[qp_id] = qp;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 883584a6e2..fb0408f8ad 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
  */
 
 #include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index e364cc8f3d..ee110741b7 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -854,7 +854,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		dpaa2_dqrr_size : nb_cpls;
 	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
 	fqid = rxq->fqid;
-	q_storage = rxq->q_storage;
+	q_storage = rxq->q_storage[0];
 
 	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
@@ -1062,13 +1062,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 				qdma_dev->vqs[i].ring_cntx_idx = NULL;
 			}
 			rxq = &dpdmai_dev->rx_queue[i];
-			if (rxq->q_storage) {
-				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
-					dev->data->dev_name, i);
-				dpaa2_free_dq_storage(rxq->q_storage);
-				rte_free(rxq->q_storage);
-				rxq->q_storage = NULL;
-			}
+			dpaa2_queue_storage_free(rxq, 1);
 		}
 		rte_free(qdma_dev->vqs);
 		qdma_dev->vqs = NULL;
@@ -1124,24 +1118,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		qdma_dev->vqs[i].vq_id = i;
 		rxq = &dpdmai_dev->rx_queue[i];
 		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq->q_storage = rte_zmalloc(NULL,
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto alloc_failed;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto alloc_failed;
-		}
 	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1152,11 +1131,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 alloc_failed:
 	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	rte_free(qdma_dev->vqs);
@@ -1350,11 +1325,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
 	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	if (qdma_dev->vqs) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 7b3e587a8d..93b88acef8 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	uint8_t num_rxqueue_per_tc;
 	struct dpaa2_queue *mc_q, *mcq;
 	uint32_t tot_queues;
-	int i;
+	int i, ret = 0;
 	struct dpaa2_queue *dpaa2_q;
 
 	PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < priv->nb_rx_queues; i++) {
 		mc_q->eth_data = dev->data;
 		priv->rx_vq[i] = mc_q++;
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_q->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
-			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+		dpaa2_q = priv->rx_vq[i];
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
 	}
 
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 		if (!priv->rx_err_vq)
 			goto fail;
 
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
-					sizeof(struct queue_storage_info_t) *
-					RTE_MAX_LCORE,
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
+		dpaa2_q = priv->rx_err_vq;
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		for (i = 0; i < RTE_MAX_LCORE; i++)
-			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
-				goto fail;
 	}
 
 	for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 			mc_q->tc_index = i;
 			mc_q->flow_id = 0;
 			priv->tx_conf_vq[i] = mc_q++;
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-			dpaa2_q->q_storage =
-				rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-			if (!dpaa2_q->q_storage)
-				goto fail_tx_conf;
-
-			memset(dpaa2_q->q_storage, 0,
-			       sizeof(struct queue_storage_info_t));
-			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+			dpaa2_q = priv->tx_conf_vq[i];
+			ret = dpaa2_queue_storage_alloc(dpaa2_q,
+					RTE_MAX_LCORE);
+			if (ret)
 				goto fail_tx_conf;
 		}
 	}
 
 	vq_id = 0;
 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
-		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+		mcq = priv->rx_vq[vq_id];
 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 		vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 fail_tx_conf:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->tx_conf_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->tx_conf_vq[i--] = NULL;
 	}
 	i = priv->nb_tx_queues;
 fail_tx:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+		dpaa2_q = priv->tx_vq[i];
 		rte_free(dpaa2_q->cscn);
 		priv->tx_vq[i--] = NULL;
 	}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	i -= 1;
 	mc_q = priv->rx_vq[0];
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->rx_vq[i--] = NULL;
 	}
 
 	if (dpaa2_enable_err_queue) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		if (dpaa2_q->q_storage)
-			dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_err_vq;
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 	}
 
 	rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 	if (priv->rx_vq[0]) {
 		/* cleaning up queue storage */
 		for (i = 0; i < priv->nb_rx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-			rte_free(dpaa2_q->q_storage);
+			dpaa2_q = priv->rx_vq[i];
+			dpaa2_queue_storage_free(dpaa2_q,
+				RTE_MAX_LCORE);
 		}
 		/* cleanup tx queue cscn */
 		for (i = 0; i < priv->nb_tx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+			dpaa2_q = priv->tx_vq[i];
 			rte_free(dpaa2_q->cscn);
 		}
 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
 			/* cleanup tx conf queue storage */
 			for (i = 0; i < priv->nb_tx_queues; i++) {
-				dpaa2_q = (struct dpaa2_queue *)
-						priv->tx_conf_vq[i];
-				rte_free(dpaa2_q->q_storage);
+				dpaa2_q = priv->tx_conf_vq[i];
+				dpaa2_queue_storage_free(dpaa2_q,
+					RTE_MAX_LCORE);
 			}
 		}
 		/*free memory for all queues (RX+TX) */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 92e9dd40dc..376291af04 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
 	if (unlikely(dpaa2_enable_err_queue))
 		dump_err_pkts(priv->rx_err_vq);
 
@@ -958,7 +960,7 @@ uint16_t
 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ */
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
 	struct qbman_pull_desc pulldesc;
 	struct qbman_eq_desc eqdesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
 	/* todo - currently we are using 1st TX queue only for loopback*/
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
 		ret = dpaa2_affine_qbman_ethrx_swp();
 		if (ret) {
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index de8c024abb..34a3c4f6af 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
  */
 
 #include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
 
 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
-	dq_storage = rxq->q_storage->dq_storage[0];
+	dq_storage = rxq->q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 04/15] dma/dpaa2: add short FD support
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (2 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
                                   ` (11 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Short FD can be used for single transfer scenario which shows higher
performance than FLE.
1) Save index context in FD att field for short and FLE(NonSG).
2) Identify FD type by att of FD.
3) Force 48 bits address for source address and fle according to spec.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |   2 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 314 +++++++++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  69 ++++--
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  13 -
 4 files changed, 286 insertions(+), 112 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index eeeb5d52a8..6ebf7ac030 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -81,3 +81,5 @@ Device Arguments
   usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
 * Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
   usage example: ``fslmc:dpdmai.1,desc_debug=1``
+* Use dev arg option ``short_fd=1`` to enable short FDs.
+  usage example: ``fslmc:dpdmai.1,short_fd=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index ee110741b7..df52d2d6b3 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -16,6 +16,7 @@
 
 #define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
 #define DPAA2_QDMA_DESC_DEBUG "desc_debug"
+#define DPAA2_QDMA_USING_SHORT_FD "short_fd"
 
 static uint32_t dpaa2_coherent_no_alloc_cache;
 static uint32_t dpaa2_coherent_alloc_cache;
@@ -552,7 +553,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 	const struct qdma_cntx_fle_sdd *fle_sdd;
 	const struct qdma_sdd *sdd;
 	const struct qdma_cntx_sg *cntx_sg = NULL;
-	const struct qdma_cntx_long *cntx_long = NULL;
 
 	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
 	sdd = fle_sdd->sdd;
@@ -575,11 +575,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		QBMAN_FLE_WORD4_FMT_SGE) {
 		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
 			fle_sdd);
-	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
 		QBMAN_FLE_WORD4_FMT_SBF) {
-		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
-			fle_sdd);
-	} else {
 		DPAA2_QDMA_ERR("Unsupported fle format:%d",
 			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
 		return;
@@ -590,11 +587,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		dpaa2_qdma_sdd_dump(&sdd[i]);
 	}
 
-	if (cntx_long) {
-		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
-			cntx_long->cntx_idx);
-	}
-
 	if (cntx_sg) {
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
@@ -612,6 +604,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
 				cntx_sg->cntx_idx[i]);
 		}
+	} else {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx");
 	}
 }
 
@@ -674,7 +668,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		offsetof(struct qdma_cntx_sg, fle_sdd) +
 		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
@@ -710,6 +704,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
+	dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -726,74 +721,178 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static inline void
+qdma_populate_fd_pci(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd,
+	struct dpaa2_qdma_rbp *rbp, int ser)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits(src);
+	fd->simple_pci.saddr_hi = upper_32_bits(src);
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+
+	fd->simple_pci.sportid = rbp->sportid;
+
+	fd->simple_pci.svfid = rbp->svfid;
+	fd->simple_pci.spfid = rbp->spfid;
+	fd->simple_pci.svfa = rbp->svfa;
+	fd->simple_pci.dvfid = rbp->dvfid;
+	fd->simple_pci.dpfid = rbp->dpfid;
+	fd->simple_pci.dvfa = rbp->dvfa;
+
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits(dest);
+	fd->simple_pci.daddr_hi = upper_32_bits(dest);
+}
+
+static inline void
+qdma_populate_fd_ddr(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd, int ser)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(src);
+	fd->simple_ddr.saddr_hi = upper_32_bits(src);
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss.
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits(dest);
+	fd->simple_ddr.daddr_hi = upper_32_bits(dest);
+}
+
 static int
-dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
+dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long = NULL;
-	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) {
+		/** PCIe EP*/
+		qdma_populate_fd_pci(src,
+			dst, length,
+			fd, &qdma_vq->rbp,
+			is_silent ? 0 : 1);
+	} else {
+		/** DDR or PCIe RC*/
+		qdma_populate_fd_ddr(src,
+			dst, length,
+			fd, is_silent ? 0 : 1);
+	}
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_SHORT);
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
+	} else {
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
+	}
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
+{
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_fle_sdd *fle_sdd = NULL;
+	rte_iova_t fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	if (is_silent) {
+		fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
-			(void **)&cntx_long);
+			(void **)&fle_sdd);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_long);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
-#endif
+	fle = fle_sdd->fle;
+	fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset;
 
-	fle = cntx_long->fle_sdd.fle;
-	fle_iova = cntx_iova +
-		offsetof(struct qdma_cntx_long, fle_sdd) +
-		offsetof(struct qdma_cntx_fle_sdd, fle);
-
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)fle);
 
 	if (qdma_vq->fle_pre_populate) {
 		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
-			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+			fle_sdd_pre_populate(fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
-			if (!qdma_dev->is_silent && cntx_long) {
-				cntx_long->cntx_idx =
-					DPAA2_QDMA_IDX_FROM_FLAG(flags);
-			}
 		}
 
 		fle_post_populate(fle, src, dst, length);
 	} else {
-		sdd = cntx_long->fle_sdd.sdd;
-		sdd_iova = cntx_iova +
-			offsetof(struct qdma_cntx_long, fle_sdd) +
-			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		sdd = fle_sdd->sdd;
+		sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset;
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
 			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
-		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
+		dpaa2_qdma_long_fmt_dump(fle);
 
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_LONG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -801,15 +900,89 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
 }
 
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	if (qdma_vq->using_short_fd)
+		return dpaa2_qdma_short_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+	else
+		return dpaa2_qdma_long_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+}
+
+static inline int
+dpaa2_qdma_dq_fd(const struct qbman_fd *fd,
+	struct qdma_virt_queue *qdma_vq,
+	uint16_t *free_space, uint16_t *fle_elem_nb)
+{
+	uint16_t idx, att;
+	enum dpaa2_qdma_fd_type type;
+	int ret;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+
+	att = dpaa2_qdma_fd_get_att(fd);
+	type = DPAA2_QDMA_FD_ATT_TYPE(att);
+	if (type == DPAA2_QDMA_FD_SHORT) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_LONG) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_SG) {
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, free_space);
+		if (unlikely(ret < cntx_sg->job_nb))
+			return -ENOSPC;
+
+		return 0;
+	}
+
+	DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x",
+		fd->simple_ddr.rsv1_att);
+	return -EIO;
+}
+
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
@@ -829,10 +1002,6 @@ dpaa2_qdma_dequeue(void *dev_private,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	int ret, pull_size;
-	struct qbman_fle *fle;
-	struct qdma_cntx_fle_sdd *fle_sdd;
-	struct qdma_cntx_sg *cntx_sg;
-	struct qdma_cntx_long *cntx_long;
 	uint16_t free_space = 0, fle_elem_nb = 0;
 
 	if (unlikely(qdma_dev->is_silent))
@@ -931,25 +1100,8 @@ dpaa2_qdma_dequeue(void *dev_private,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
-		fle = fle_sdd->fle;
-		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
-		fle_elem_nb++;
-		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
-			QBMAN_FLE_WORD4_FMT_SGE) {
-			cntx_sg = container_of(fle_sdd,
-				struct qdma_cntx_sg, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				cntx_sg->cntx_idx,
-				cntx_sg->job_nb, &free_space);
-		} else {
-			cntx_long = container_of(fle_sdd,
-				struct qdma_cntx_long, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				&cntx_long->cntx_idx,
-				1, &free_space);
-		}
-		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
+		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -974,8 +1126,10 @@ dpaa2_qdma_dequeue(void *dev_private,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	rte_mempool_put_bulk(qdma_vq->fle_pool,
-		qdma_vq->fle_elem, fle_elem_nb);
+	if (fle_elem_nb > 0) {
+		rte_mempool_put_bulk(qdma_vq->fle_pool,
+			qdma_vq->fle_elem, fle_elem_nb);
+	}
 
 	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
 		cntx_idx, nb_cpls);
@@ -1204,11 +1358,14 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	else
 		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_USING_SHORT_FD))
+		qdma_dev->vqs[vchan].using_short_fd = 1;
+	else
+		qdma_dev->vqs[vchan].using_short_fd = 0;
+
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
-			    sizeof(struct qdma_cntx_long));
-
+	pool_size = sizeof(struct qdma_cntx_sg);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
 			DPAA2_QDMA_MAX_DESC * 2, pool_size,
 			512, 0, NULL, NULL, NULL, NULL,
@@ -1228,7 +1385,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 			return ret;
 		}
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
-				(void **)qdma_dev->vqs[vchan].cntx_long,
+				(void **)qdma_dev->vqs[vchan].cntx_fle_sdd,
 				DPAA2_QDMA_MAX_DESC);
 		if (ret) {
 			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
@@ -1595,5 +1752,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
 	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
-	DPAA2_QDMA_DESC_DEBUG"=<int>");
+	DPAA2_QDMA_DESC_DEBUG"=<int>"
+	DPAA2_QDMA_USING_SHORT_FD"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 371393cb85..0be65e1cc6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -12,17 +12,8 @@
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
-#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT		(1ULL << 0)
-#define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
-#define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
-
-/** FLE pool cache size */
-#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
-
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
 #define DPAA2_RBP_MEM_RW            0x0
@@ -40,9 +31,14 @@
 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
-#define MAX_HW_QUEUE_PER_CORE		64
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
+#define DPAA2_QDMA_FD_LONG_FORMAT 0x1
+#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
 
-#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+#define DPAA2_QDMA_BMT_ENABLE 0x1
+#define DPAA2_QDMA_BMT_DISABLE 0x0
 
 /** Source/Destination Descriptor */
 struct qdma_sdd {
@@ -99,8 +95,8 @@ struct qdma_sdd {
 #define QDMA_SG_SL_SHORT	0x1 /* short length */
 #define QDMA_SG_SL_LONG	0x0 /* long length */
 #define QDMA_SG_F	0x1 /* last sg entry */
-#define QDMA_SG_BMT_ENABLE 0x1
-#define QDMA_SG_BMT_DISABLE 0x0
+#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
+#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
 
 struct qdma_sg_entry {
 	uint32_t addr_lo;		/* address 0:31 */
@@ -166,6 +162,40 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum dpaa2_qdma_fd_type {
+	DPAA2_QDMA_FD_SHORT = 1,
+	DPAA2_QDMA_FD_LONG = 2,
+	DPAA2_QDMA_FD_SG = 3
+};
+
+#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_TYPE(att) \
+	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
+#define DPAA2_QDMA_FD_ATT_CNTX(att) \
+	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+
+static inline void
+dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
+	uint64_t addr)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
+	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
+}
+
+static inline void
+dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
+	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
+{
+	fd->simple_ddr.rsv1_att = job_idx |
+		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
+}
+
+static inline uint16_t
+dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
+{
+	return fd->simple_ddr.rsv1_att;
+}
+
 enum {
 	DPAA2_QDMA_SDD_FLE,
 	DPAA2_QDMA_SRC_FLE,
@@ -193,12 +223,6 @@ struct qdma_cntx_sg {
 	uint16_t rsv[3];
 } __rte_packed;
 
-struct qdma_cntx_long {
-	struct qdma_cntx_fle_sdd fle_sdd;
-	uint16_t cntx_idx;
-	uint16_t rsv[3];
-} __rte_packed;
-
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
 
@@ -241,6 +265,7 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	uint64_t fle_iova2va_offset;
 	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
@@ -252,6 +277,7 @@ struct qdma_virt_queue {
 	uint64_t num_enqueues;
 	/* Total number of dequeues from this VQ */
 	uint64_t num_dequeues;
+	uint64_t copy_num;
 
 	uint16_t vq_id;
 	uint32_t flags;
@@ -261,10 +287,11 @@ struct qdma_virt_queue {
 
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
-	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
 	uint16_t silent_idx;
 
 	int num_valid_jobs;
+	int using_short_fd;
 
 	struct rte_dma_stats stats;
 };
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index e49604c8fc..df21b39cae 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,19 +7,6 @@
 
 #include <rte_compat.h>
 
-#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
-#define RTE_DPAA2_QDMA_LEN_MASK \
-	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
-	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
-
-#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
-	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
-	((length) & RTE_DPAA2_QDMA_LEN_MASK)
-
 #define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
 #define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
 	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 05/15] dma/dpaa2: limit the max descriptor number
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (3 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 04/15] dma/dpaa2: add short FD support Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
                                   ` (10 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For non-SG format, the index is saved in FD with
DPAA2_QDMA_FD_ATT_TYPE_OFFSET(13) bits width.

The max descriptor number of ring is power of 2, so the
eventual max number is:
((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) / 2)

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 0be65e1cc6..250c83c83c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -8,8 +8,6 @@
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-#define DPAA2_QDMA_MAX_DESC		4096
-#define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
 #define DPAA2_DPDMAI_MAX_QUEUES	16
@@ -169,10 +167,15 @@ enum dpaa2_qdma_fd_type {
 };
 
 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_MAX_IDX \
+	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
-	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
+
+#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
+#define DPAA2_QDMA_MIN_DESC 1
 
 static inline void
 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
@@ -186,6 +189,7 @@ static inline void
 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
 {
+	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
 	fd->simple_ddr.rsv1_att = job_idx |
 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 06/15] dma/dpaa2: change the DMA copy return value
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (4 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
                                   ` (9 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The return value of DMA copy/sg copy should be index of
descriptor copied in success.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index df52d2d6b3..5e7640ae08 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -636,6 +636,11 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -ENOTSUP;
 	}
 
+	if (unlikely(!nb_src)) {
+		DPAA2_QDMA_ERR("No SG entry specified");
+		return -EINVAL;
+	}
+
 	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
 			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
@@ -712,10 +717,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num += nb_src;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num += nb_src;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 07/15] dma/dpaa2: move the qdma header to common place
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (5 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 08/15] dma/dpaa: refactor driver Gagandeep Singh
                                   ` (8 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include rte_pmd_dpaax_qdma.h instead of rte_pmd_dpaa2_qdma.h
and change code accordingly.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/api/doxy-api-index.md                 |  2 +-
 doc/api/doxy-api.conf.in                  |  2 +-
 drivers/common/dpaax/meson.build          |  3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h | 23 +++++++
 drivers/dma/dpaa2/dpaa2_qdma.c            | 84 +++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h            | 10 +--
 drivers/dma/dpaa2/meson.build             |  4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h    | 23 -------
 8 files changed, 72 insertions(+), 79 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index f9f0300126..5a4411eb4a 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
   [mlx5](@ref rte_pmd_mlx5.h),
   [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
   [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
-  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+  [dpaax](@ref rte_pmd_dpaax_qdma.h),
   [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
   [dlb2](@ref rte_pmd_dlb2.h),
   [ifpga](@ref rte_pmd_ifpga.h)
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a8823c046f..33250d867c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -8,7 +8,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/cnxk \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/dma/dpaa2 \
+                          @TOPDIR@/drivers/common/dpaax \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/event/cnxk \
                           @TOPDIR@/drivers/mempool/cnxk \
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
index a162779116..db61b76ce3 100644
--- a/drivers/common/dpaax/meson.build
+++ b/drivers/common/dpaax/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
 
 if not is_linux
     build = false
@@ -16,3 +16,4 @@ endif
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
+headers = files('rte_pmd_dpaax_qdma.h')
diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
new file mode 100644
index 0000000000..2552a4adfb
--- /dev/null
+++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5e7640ae08..71e9ffdfc1 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,7 +10,7 @@
 
 #include <mc/fsl_dpdmai.h>
 
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
@@ -243,16 +243,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 *
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
@@ -266,23 +266,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+	for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
 		/* source SG */
 		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		/* destination SG */
 		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 	}
 }
 
@@ -381,21 +379,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
 		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
 		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
@@ -475,17 +471,16 @@ fle_populate(struct qbman_fle fle[],
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
@@ -591,7 +586,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
 		if (!cntx_sg->job_nb ||
-			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
 			DPAA2_QDMA_ERR("Invalid SG job number:%d",
 				cntx_sg->job_nb);
 			return;
@@ -641,9 +636,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -EINVAL;
 	}
 
-	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+	if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
-			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+			nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
 		return -EINVAL;
 	}
 
@@ -662,11 +657,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+	cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
 
 	fle = cntx_sg->fle_sdd.fle;
 	fle_iova = cntx_iova +
@@ -698,8 +689,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			offsetof(struct qdma_cntx_sg, sg_src_entry);
 		dst_sge_iova = cntx_iova +
 			offsetof(struct qdma_cntx_sg, sg_dst_entry);
-		len = sg_entry_populate(src, dst,
-			cntx_sg, nb_src);
+		len = sg_entry_populate(src, dst, cntx_sg, nb_src);
 
 		fle_populate(fle, sdd, sdd_iova,
 			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1042,7 +1032,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 			q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			DPAA2_VADDR_TO_IOVA(dq_storage), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
 			       get_swp_active_dqs(
@@ -1077,7 +1067,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
-		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
 
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
@@ -1109,7 +1099,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
-		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -1162,11 +1152,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 		RTE_DMA_CAPA_SILENT |
 		RTE_DMA_CAPA_OPS_COPY |
 		RTE_DMA_CAPA_OPS_COPY_SG;
-	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
-	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1347,6 +1337,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	uint32_t pool_size;
 	char pool_name[64];
 	int ret;
+	uint64_t iova, va;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1382,6 +1373,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
+	iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+	va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+	qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
 
 	if (qdma_dev->is_silent) {
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 250c83c83c..0fd1debaf8 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
 
 struct qdma_cntx_sg {
 	struct qdma_cntx_fle_sdd fle_sdd;
-	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
 	uint16_t job_nb;
 	uint16_t rsv[3];
 } __rte_packed;
 
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
-	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
 
 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
-	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
 
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build
index a99151e2a5..a523f5edb4 100644
--- a/drivers/dma/dpaa2/meson.build
+++ b/drivers/dma/dpaa2/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
 
 if not is_linux
     build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
deleted file mode 100644
index df21b39cae..0000000000
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
-	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
-	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
-	(((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
-	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 08/15] dma/dpaa: refactor driver
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (6 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-15  2:59                   ` Stephen Hemminger
  2024-10-14  9:36                 ` [v6 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
                                   ` (7 subsequent siblings)
  15 siblings, 1 reply; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch refactor the DPAA DMA driver code with changes:
 - HW descriptors rename and update with details.
 - update qdma engine and queue structures
 - using rte_ring APIs for enqueue and dequeue.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 1330 ++++++++++++++++++----------------
 drivers/dma/dpaa/dpaa_qdma.h |  222 +++---
 2 files changed, 864 insertions(+), 688 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 3d4fd818f8..a10a867580 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #include <bus_dpaa_driver.h>
@@ -8,69 +8,71 @@
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static uint32_t s_sg_max_entry_sz = 2000;
+
 static inline void
-qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
 {
 	ccdf->addr_hi = upper_32_bits(addr);
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+static inline void
+qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
 {
-	return ccdf->cfg8b_w1 & 0xff;
+	sge->addr_hi = upper_32_bits(addr);
+	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
+	uint8_t *queue_idx)
 {
-	return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_OFFSET;
-}
+	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
+
+	if (addr && queue_idx)
+		*queue_idx = ccdf->queue;
+	if (addr) {
+		ccdf->addr_hi = 0;
+		ccdf->addr_lo = 0;
+		return true;
+	}
 
-static inline void
-qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-{
-	ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
+	return false;
 }
 
 static inline int
-qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+ilog2(int x)
 {
-	return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_STATUS;
-}
+	int log = 0;
 
-static inline void
-qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
-{
-	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
+	x >>= 1;
+
+	while (x) {
+		log++;
+		x >>= 1;
+	}
+	return log;
 }
 
-static inline void
-qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qsize(uint32_t q_size)
 {
-	csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
+	return (ilog2(q_size) - ilog2(64));
 }
 
-static inline void
-qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qthld(uint32_t q_thld)
 {
-	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+	return (ilog2(q_thld) - ilog2(16));
 }
 
 static inline int
-ilog2(int x)
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
 {
-	int log = 0;
-
-	x >>= 1;
+	struct rte_dma_stats *stats = &fsl_queue->stats;
 
-	while (x) {
-		log++;
-		x >>= 1;
-	}
-	return log;
+	return (stats->submitted - stats->completed);
 }
 
 static u32
@@ -97,12 +99,12 @@ qdma_writel_be(u32 val, void *addr)
 	QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
-	virt_addr = rte_malloc("dma pool alloc", size, aligned);
+	virt_addr = rte_zmalloc(nm, size, aligned);
 	if (!virt_addr)
 		return NULL;
 
@@ -111,268 +113,221 @@ static void
 	return virt_addr;
 }
 
-static void
-dma_pool_free(void *addr)
-{
-	rte_free(addr);
-}
-
-static void
-fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
-	int id;
-
-	if (--fsl_queue->count)
-		goto finally;
-
-	id = (fsl_qdma->block_base - fsl_queue->block_base) /
-	      fsl_qdma->block_offset;
-
-	while (rte_atomic32_read(&wait_task[id]) == 1)
-		rte_delay_us(QDMA_DELAY);
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_used,	list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-finally:
-	fsl_qdma->desc_allocated--;
-}
-
-static void
-fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-				      dma_addr_t dst, dma_addr_t src, u32 len)
-{
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
-
-	/* Note: command table (fsl_comp->virt_addr) is getting filled
-	 * directly in cmd descriptors of queues while enqueuing the descriptor
-	 * please refer fsl_qdma_enqueue_desc
-	 * frame list table (virt_addr) + 1) and source,
-	 * destination descriptor table
-	 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
-	 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
-	 */
-	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-
-	/* Status notification is enqueued to status queue. */
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-}
-
 /*
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-					struct fsl_qdma_queue *queue,
-					int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *csgf_desc;
-	int i;
-
-	for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
-		comp_temp = rte_zmalloc("qdma: comp temp",
-					sizeof(*comp_temp), 0);
-		if (!comp_temp)
-			return -ENOMEM;
-
-		comp_temp->virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
-		if (!comp_temp->virt_addr) {
-			rte_free(comp_temp);
+	struct fsl_qdma_comp_cmd_desc *ccdf;
+	uint16_t i, j;
+	struct fsl_qdma_cmpd_ft *ft;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		dma_addr_t phy_ft = 0;
+
+		queue->ft[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_cmpd_ft),
+			RTE_CACHE_LINE_SIZE, &phy_ft);
+		if (!queue->ft[i])
+			goto fail;
+		if (((uint64_t)queue->ft[i]) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
+				i, queue->ft[i]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		comp_temp->desc_virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
-		if (!comp_temp->desc_virt_addr) {
-			rte_free(comp_temp->virt_addr);
-			rte_free(comp_temp);
+		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
+				i, &queue->ft[i]->desc_ssge[0]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(comp_temp->desc_virt_addr, 0,
-		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
-
-		csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
-		sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
-		ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+		queue->ft[i]->phy_ssge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
+		queue->ft[i]->phy_dsge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
+
+		ft = queue->ft[i];
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(csgf_desc, 32);
+		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
+
 		/* Descriptor Buffer */
-		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
-				FSL_QDMA_CMD_LWC_OFFSET);
-
-		list_add_tail(&comp_temp->list, &queue->comp_free);
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+
+		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
+		ddf->lwc = FSL_QDMA_CMD_LWC;
+
+		ccdf = &queue->cq[i];
+		qdma_desc_addr_set64(ccdf, phy_ft);
+		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
+
+		ccdf->queue = queue->queue_id;
 	}
+	queue->ci = 0;
 
 	return 0;
 
 fail:
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		rte_free(comp_temp->virt_addr);
-		rte_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
+	for (j = 0; j < i; j++)
+		rte_free(queue->ft[j]);
 
 	return -ENOMEM;
 }
 
-/*
- * Request a command descriptor for enqueue.
- */
-static struct fsl_qdma_comp *
-fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+static int
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+	int queue_id, int block_id)
 {
-	struct fsl_qdma_queue *queue = fsl_chan->queue;
-	struct fsl_qdma_comp *comp_temp;
-
-	if (!list_empty(&queue->comp_free)) {
-		comp_temp = list_first_entry(&queue->comp_free,
-					     struct fsl_qdma_comp,
-					     list);
-		list_del(&comp_temp->list);
-		return comp_temp;
+	struct fsl_qdma_queue *cmd_queue;
+	uint32_t queue_size;
+	char nm[RTE_MEMZONE_NAMESIZE];
+
+	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
+	cmd_queue->engine = fsl_qdma;
+
+	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
+		QDMA_QUEUE_SIZE;
+
+	sprintf(nm, "Command queue_%d_%d",
+		block_id, queue_id);
+	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
+		queue_size, &cmd_queue->bus_addr);
+	if (!cmd_queue->cq) {
+		DPAA_QDMA_ERR("%s alloc failed!", nm);
+		return -ENOMEM;
 	}
 
-	return NULL;
-}
-
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
-{
-	struct fsl_qdma_queue *queue_head, *queue_temp;
-	int len, i, j;
-	int queue_num;
-	int blocks;
-	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-
-	queue_num = fsl_qdma->n_queues;
-	blocks = fsl_qdma->num_blocks;
-
-	len = sizeof(*queue_head) * queue_num * blocks;
-	queue_head = rte_zmalloc("qdma: queue head", len, 0);
-	if (!queue_head)
-		return NULL;
+	cmd_queue->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
+	cmd_queue->queue_id = queue_id;
+	cmd_queue->block_id = block_id;
+	cmd_queue->pending_start = 0;
+	cmd_queue->pending_num = 0;
+	cmd_queue->complete_start = 0;
+
+	sprintf(nm, "Compound Table_%d_%d",
+		block_id, queue_id);
+	cmd_queue->ft = rte_zmalloc(nm,
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!cmd_queue->ft) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "Pending_desc_%d_%d",
+		block_id, queue_id);
+	cmd_queue->pending_desc = rte_zmalloc(nm,
+		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
+	if (!cmd_queue->pending_desc) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-burst_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_burst = rte_ring_create(nm,
+		QDMA_QUEUE_SIZE * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_burst) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_desc = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_desc) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-pool-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_pool = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_pool) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_desc);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
 
-	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
-		queue_size[i] = QDMA_QUEUE_SIZE;
+	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
+	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
 
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-				DPAA_QDMA_ERR("Get wrong queue-sizes.");
-				goto fail;
-			}
-			queue_temp = queue_head + i + (j * queue_num);
-
-			queue_temp->cq =
-			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-				       queue_size[i],
-				       sizeof(struct fsl_qdma_format) *
-				       queue_size[i], &queue_temp->bus_addr);
-
-			if (!queue_temp->cq)
-				goto fail;
-
-			memset(queue_temp->cq, 0x0, queue_size[i] *
-			       sizeof(struct fsl_qdma_format));
-
-			queue_temp->block_base = fsl_qdma->block_base +
-				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-			queue_temp->n_cq = queue_size[i];
-			queue_temp->id = i;
-			queue_temp->count = 0;
-			queue_temp->pending = 0;
-			queue_temp->virt_head = queue_temp->cq;
-			queue_temp->stats = (struct rte_dma_stats){0};
-		}
-	}
-	return queue_head;
+	return 0;
+}
 
-fail:
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			queue_temp = queue_head + i + (j * queue_num);
-			dma_pool_free(queue_temp->cq);
-		}
-	}
-	rte_free(queue_head);
+static void
+fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
+{
+	rte_free(queue->ft);
+	rte_free(queue->cq);
+	rte_free(queue->pending_desc);
+	rte_ring_free(queue->complete_burst);
+	rte_ring_free(queue->complete_desc);
+	rte_ring_free(queue->complete_pool);
+}
 
-	return NULL;
+static void
+fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
+{
+	rte_free(queue->cq);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+static int
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+	uint32_t block_id)
 {
-	struct fsl_qdma_queue *status_head;
-	unsigned int status_size;
+	struct fsl_qdma_status_queue *status;
+	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE;
-	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-		DPAA_QDMA_ERR("Get wrong status_size.");
-		return NULL;
-	}
+	status = &fsl_qdma->stat_queues[block_id];
+	status->engine = fsl_qdma;
 
-	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
-	if (!status_head)
-		return NULL;
+	status_size = QDMA_STATUS_SIZE *
+		sizeof(struct fsl_qdma_comp_cmd_desc);
 
-	/*
-	 * Buffer for queue command
-	 */
-	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 &status_head->bus_addr);
-
-	if (!status_head->cq) {
-		rte_free(status_head);
-		return NULL;
-	}
+	status->cq = dma_pool_alloc(NULL, status_size,
+		status_size, &status->bus_addr);
+
+	if (!status->cq)
+		return -ENOMEM;
 
-	memset(status_head->cq, 0x0, status_size *
-	       sizeof(struct fsl_qdma_format));
-	status_head->n_cq = status_size;
-	status_head->virt_head = status_head->cq;
+	memset(status->cq, 0x0, status_size);
+	status->n_cq = QDMA_STATUS_SIZE;
+	status->complete = 0;
+	status->block_id = block_id;
+	status->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	return status_head;
+	return 0;
 }
 
 static int
@@ -420,59 +375,41 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
-				 uint16_t *last_idx,
-				 enum rte_dma_status_code *status)
+static void
+fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
+	uint8_t num, struct fsl_qdma_queue *fsl_queue)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_queue *temp_queue;
-	struct fsl_qdma_format *status_addr;
-	struct fsl_qdma_comp *fsl_comp = NULL;
-	u32 reg, i;
-	int count = 0;
-
-	while (count < nb_cpls) {
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			return count;
-
-		status_addr = fsl_status->virt_head;
-
-		i = qdma_ccdf_get_queue(status_addr) +
-			id * fsl_qdma->n_queues;
-		temp_queue = fsl_queue + i;
-		fsl_comp = list_first_entry(&temp_queue->comp_used,
-					    struct fsl_qdma_comp,
-					    list);
-		list_del(&fsl_comp->list);
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
-		*last_idx = fsl_comp->index;
-		if (status != NULL)
-			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
-
-		list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
-		count++;
-
+	uint32_t i, j;
+	uint8_t *v_src, *v_dst;
+	char err_msg[512];
+	int offset;
+
+
+	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
+		fsl_queue->block_id,
+		fsl_queue->queue_id);
+	for (i = 0; i < num; i++) {
+		v_src = rte_mem_iova2virt(desc[i]->src);
+		v_dst = rte_mem_iova2virt(desc[i]->dst);
+		for (j = 0; j < desc[i]->len; j++) {
+			if (v_src[j] != v_dst[j]) {
+				sprintf(&err_msg[offset],
+					"job[%"PRIu64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
+					desc[i]->flag, v_src, j, v_src[j],
+					v_dst, j, v_dst[j]);
+				DPAA_QDMA_ERR("%s, stop validating!",
+					err_msg);
+				return;
+			}
+		}
 	}
-	return count;
 }
 
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *temp;
+	struct fsl_qdma_status_queue *temp_stat;
 	void *ctrl = fsl_qdma->ctrl_base;
 	void *block;
 	u32 i, j;
@@ -489,8 +426,8 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		for (i = 0; i < fsl_qdma->n_queues; i++) {
-			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+		for (i = 0; i < QDMA_QUEUES; i++) {
+			temp = &fsl_qdma->cmd_queues[j][i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -531,18 +468,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEEPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEPAR);
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEDPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQDPAR);
+		temp_stat = &fsl_qdma->stat_queues[j];
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEEPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEPAR);
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEDPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -551,7 +485,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+		val = ilog2_qsize(temp_stat->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -563,158 +497,389 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static void *
-fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
-			   dma_addr_t src, size_t len,
-			   void *call_back,
-			   void *param)
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
 {
-	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
 
-	fsl_comp =
-	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
-	if (!fsl_comp)
-		return NULL;
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	fsl_comp->qchan = fsl_chan;
-	fsl_comp->call_back_func = call_back;
-	fsl_comp->params = param;
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
 
-	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-	return (void *)fsl_comp;
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret)
+				rte_panic("DQ desc number failed!\n");
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				rte_panic("DQ %d descs failed!(%d)\n",
+					*dq_complete, ret);
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				rte_panic("Failed desc eq %d!=%d to %s\n",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
-				  struct fsl_qdma_comp *fsl_comp,
-				  uint64_t flags)
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	uint16_t num)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	struct fsl_qdma_format *ccdf;
-	u32 reg;
+	uint16_t i, idx, start, dq;
+	int ret, dq_cnt;
+
 
-	/* retrieve and store the register value in big endian
-	 * to avoid bits swap
-	 */
-	reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
-		return -1;
-
-	/* filling descriptor  command table */
-	ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
-	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
-	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
-	fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
-	fsl_queue->virt_head++;
-
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
-		fsl_queue->virt_head = fsl_queue->cq;
-
-	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
-
-	if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->stats.submitted++;
-	} else {
-		fsl_queue->pending++;
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+eq_again:
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		dq_cnt = 0;
+dq_again:
+		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
+			fsl_queue->block_id);
+		dq_cnt++;
+		if (dq > 0) {
+			goto eq_again;
+		} else {
+			if (dq_cnt < 100)
+				goto dq_again;
+			DPAA_QDMA_ERR("%s: Dq block%d failed!",
+				__func__, fsl_queue->block_id);
+		}
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			rte_panic("Descriptors eq failed!\r\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0;
+	uint16_t blk_drain, check_num, drain_num;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
+
+	check_num = 0;
+overflow_check:
+	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+
+	if (likely(!overflow))
+		return 0;
+
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
+		drain_num++;
+		if (drain_num >= 10000) {
+			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			drain_num = 0;
+		}
+		goto drain_again;
+	}
+	check_num++;
+	if (check_num >= 1000) {
+		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
+		check_num = 0;
 	}
-	return fsl_comp->index;
+	goto overflow_check;
+
+	return 0;
 }
 
 static int
-fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
 
-	if (fsl_queue->count++)
-		goto finally;
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
 
-	INIT_LIST_HEAD(&fsl_queue->comp_free);
-	INIT_LIST_HEAD(&fsl_queue->comp_used);
+	ft = fsl_queue->ft[fsl_queue->ci];
 
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-				FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
-	if (ret) {
-		DPAA_QDMA_ERR(
-			"failed to alloc dma buffer for comp descriptor");
-		goto exit;
-	}
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
 
-finally:
-	return fsl_qdma->desc_allocated++;
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
 
-exit:
-	return -ENOMEM;
+	return 0;
 }
 
 static int
-dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	      uint32_t info_sz)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	uint32_t total_len;
+	uint16_t start, idx, num, i, next_idx;
+	int ret;
 
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+eq_sg:
+	total_len = 0;
+	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
+		fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num--;
+		}
+		if (fsl_queue->pending_num > 0)
+			goto eq_sg;
 
-	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 1;
-	dev_info->max_desc = DPAADMA_MAX_DESC;
-	dev_info->min_desc = DPAADMA_MIN_DESC;
+		return ret;
+	}
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
+		num = FSL_QDMA_SG_MAX_ENTRY;
+	else
+		num = fsl_queue->pending_num;
+
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+
+	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
+	csgf_src->extion = 1;
+	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
+	csgf_dest->extion = 1;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
+			fsl_queue->pending_desc[idx].src);
+		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_ssge[i].final = 0;
+		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
+			fsl_queue->pending_desc[idx].dst);
+		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_dsge[i].final = 0;
+		total_len += fsl_queue->pending_desc[idx].len;
+		if ((i + 1) != num) {
+			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
+			if (fsl_queue->pending_desc[next_idx].len >
+				s_sg_max_entry_sz) {
+				num = i + 1;
+				break;
+			}
+		}
+	}
+
+	ft->desc_ssge[num - 1].final = 1;
+	ft->desc_dsge[num - 1].final = 1;
+	csgf_src->length = total_len;
+	csgf_dest->length = total_len;
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
+	if (ret)
+		return ret;
+
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	fsl_queue->pending_start =
+		(start + num) & (fsl_queue->pending_max - 1);
+	fsl_queue->pending_num -= num;
+	if (fsl_queue->pending_num > 0)
+		goto eq_sg;
 
 	return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 {
-	u32 i, start, end;
+	uint16_t start = fsl_queue->pending_start;
 	int ret;
 
-	start = fsl_qdma->free_block_id * QDMA_QUEUES;
-	fsl_qdma->free_block_id++;
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
 
-	end = start + 1;
-	for (i = start; i < end; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+static int
+dpaa_qdma_info_get(const struct rte_dma_dev *dev,
+	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
-		if (fsl_chan->free) {
-			fsl_chan->free = false;
-			ret = fsl_qdma_alloc_chan_resources(fsl_chan);
-			if (ret)
-				return ret;
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= DPAA_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = fsl_qdma->n_queues;
+	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
+	dev_info->min_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
 
-			fsl_qdma->vchan_map[vchan] = i;
-			return 0;
+	return 0;
+}
+
+static int
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+	uint16_t vchan)
+{
+	int ret, i, j, found = 0;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	if (fsl_queue) {
+		found = 1;
+		goto queue_found;
+	}
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_queue = &fsl_qdma->cmd_queues[i][j];
+
+			if (fsl_queue->channel_id == vchan) {
+				found = 1;
+				fsl_qdma->chan[vchan] = fsl_queue;
+				goto queue_found;
+			}
 		}
 	}
 
-	return -1;
-}
+queue_found:
+	if (!found)
+		return -ENXIO;
 
-static void
-dma_release(void *fsl_chan)
-{
-	((struct fsl_qdma_chan *)fsl_chan)->free = true;
-	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+	if (fsl_queue->used)
+		return 0;
+
+	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
+	if (ret)
+		return ret;
+
+	fsl_queue->used = 1;
+	fsl_qdma->block_queues[fsl_queue->block_id]++;
+
+	return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-		    __rte_unused const struct rte_dma_conf *dev_conf,
-		    __rte_unused uint32_t conf_sz)
+	__rte_unused const struct rte_dma_conf *dev_conf,
+	__rte_unused uint32_t conf_sz)
 {
 	return 0;
 }
@@ -745,148 +910,112 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	u32 reg;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->pending--;
-		fsl_queue->stats.submitted++;
-	}
+	if (!fsl_queue->pending_num)
+		return 0;
 
-	return 0;
+	return fsl_qdma_enqueue_desc(fsl_queue);
 }
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-		  rte_iova_t src, rte_iova_t dst,
-		  uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	uint16_t start = fsl_queue->pending_start;
+	uint8_t pending = fsl_queue->pending_num;
+	uint16_t idx;
 	int ret;
 
-	void *fsl_comp = NULL;
-
-	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
-			(dma_addr_t)dst, (dma_addr_t)src,
-			length, NULL, NULL);
-	if (!fsl_comp) {
-		DPAA_QDMA_DP_DEBUG("fsl_comp is NULL");
-		return -1;
+	if (pending >= fsl_queue->pending_max) {
+		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
+			pending, vchan);
+		return -ENOSPC;
 	}
-	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	idx = (start + pending) & (fsl_queue->pending_max - 1);
+
+	fsl_queue->pending_desc[idx].src = src;
+	fsl_queue->pending_desc[idx].dst = dst;
+	fsl_queue->pending_desc[idx].flag =
+		DPAA_QDMA_IDX_FROM_FLAG(flags);
+	fsl_queue->pending_desc[idx].len = length;
+	fsl_queue->pending_num++;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
 
 	return ret;
 }
 
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			 const uint16_t nb_cpls, uint16_t *last_idx,
-			 enum rte_dma_status_code *st)
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
+
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, st);
-	fsl_queue->stats.completed += intr;
 
-	return intr;
+	return dq_num;
 }
 
-
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-		  uint16_t vchan, const uint16_t nb_cpls,
-		  uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
-	}
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, NULL);
-	fsl_queue->stats.completed += intr;
+	*has_error = false;
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
 
-	return intr;
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	return dq_num;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -903,17 +1032,15 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	fsl_queue->stats = (struct rte_dma_stats){0};
+	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
 	return 0;
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
-	.dev_info_get		  = dpaa_info_get,
+	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
@@ -926,90 +1053,80 @@ static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan;
 	uint64_t phys_addr;
-	unsigned int len;
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i;
+	uint32_t i, j, k;
 
-	fsl_qdma->desc_allocated = 0;
-	fsl_qdma->n_chans = VIRT_CHANNELS;
-	fsl_qdma->n_queues = QDMA_QUEUES;
+	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
 
-	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
-	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
-	if (!fsl_qdma->chans)
-		return -1;
-
-	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
-	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
-	if (!fsl_qdma->status) {
-		rte_free(fsl_qdma->chans);
-		return -1;
-	}
-
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		rte_atomic32_init(&wait_task[i]);
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
-		if (!fsl_qdma->status[i])
-			goto err;
-	}
-
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		goto err;
+		return ccsr_qdma_fd;
 	}
 
-	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 	phys_addr = QDMA_CCSR_BASE;
-	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-					 PROT_WRITE, MAP_SHARED,
-					 ccsr_qdma_fd, phys_addr);
+	fsl_qdma->reg_base = mmap(NULL, regs_size,
+		PROT_READ | PROT_WRITE, MAP_SHARED,
+		ccsr_qdma_fd, phys_addr);
 
 	close(ccsr_qdma_fd);
-	if (fsl_qdma->ctrl_base == MAP_FAILED) {
-		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-		       "size %d", phys_addr, regs_size);
-		goto err;
+	if (fsl_qdma->reg_base == MAP_FAILED) {
+		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIx64"), size(%d)",
+			phys_addr, regs_size);
+		return -ENOMEM;
 	}
 
-	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
-	if (!fsl_qdma->queue) {
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+	fsl_qdma->ctrl_base =
+		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+	fsl_qdma->status_base =
+		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+	fsl_qdma->block_base =
+		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (ret)
+			goto mem_free;
 	}
 
-	for (i = 0; i < fsl_qdma->n_chans; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		fsl_chan->qdma = fsl_qdma;
-		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-							fsl_qdma->num_blocks);
-		fsl_chan->free = true;
+	k = 0;
+	for (i = 0; i < QDMA_QUEUES; i++) {
+		for (j = 0; j < QDMA_BLOCKS; j++) {
+			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
+			if (ret)
+				goto mem_free;
+			fsl_qdma->cmd_queues[j][i].channel_id = k;
+			k++;
+		}
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.");
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+		goto mem_free;
 	}
 
 	return 0;
 
-err:
-	rte_free(fsl_qdma->chans);
-	rte_free(fsl_qdma->status);
+mem_free:
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
+
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
 
-	return -1;
+	munmap(fsl_qdma->ctrl_base, regs_size);
+
+	return ret;
 }
 
 static int
@@ -1052,17 +1169,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+	uint32_t i, j, regs_size;
+
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 
-	for (i = 0; i < max; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+	for (i = 0; i < QDMA_BLOCKS; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-		if (fsl_chan->free == false)
-			dma_release(fsl_chan);
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
 	}
 
-	rte_free(fsl_qdma->status);
-	rte_free(fsl_qdma->chans);
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 7e9e76e21a..75c014f32f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #ifndef _DPAA_QDMA_H_
@@ -11,7 +11,6 @@
 #define BIT(nr)		(1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES	5
 
 #ifndef GENMASK
@@ -20,6 +19,14 @@
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
 #define FSL_QDMA_DEDR			0xe04
@@ -54,15 +61,16 @@
 #define FSL_QDMA_QUEUE_MAX		8
 
 #define FSL_QDMA_BCQMR_EN		0x80000000
-#define FSL_QDMA_BCQMR_EI_BE		0x40
+#define FSL_QDMA_BCQMR_EI		0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
-#define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -75,23 +83,14 @@
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
 
+#define FSL_QDMA_COMP_SG_FORMAT		0x1
+
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
 
-#define QDMA_CCDF_STATUS		20
-#define QDMA_CCDF_OFFSET		20
-#define QDMA_CCDF_MASK			GENMASK(28, 20)
-#define QDMA_CCDF_FOTMAT		BIT(29)
-#define QDMA_CCDF_SER			BIT(30)
-
-#define QDMA_SG_FIN			BIT(30)
-#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
-
-#define COMMAND_QUEUE_OVERFLOW		10
-
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE			64
 #define QDMA_STATUS_SIZE		64
@@ -101,6 +100,7 @@
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
 #define QDMA_DELAY			1000
+#define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -118,89 +118,145 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
 	(((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
-struct fsl_qdma_format {
-	__le32 status; /* ser, status */
-	__le32 cfg;	/* format, offset */
-	union {
-		struct {
-			__le32 addr_lo;	/* low 32-bits of 40-bit address */
-			u8 addr_hi;	/* high 8-bits of 40-bit address */
-			u8 __reserved1[2];
-			u8 cfg8b_w1; /* dd, queue */
-		};
-		__le64 data;
-	};
-};
+struct fsl_qdma_comp_cmd_desc {
+	uint8_t status;
+	uint32_t rsv0:22;
+	uint32_t ser:1;
+	uint32_t rsv1:21;
+	uint32_t offset:9;
+	uint32_t format:3;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint16_t rsv3;
+	uint8_t queue:3;
+	uint8_t rsv4:3;
+	uint8_t dd:2;
+} __rte_packed;
+
+struct fsl_qdma_comp_sg_desc {
+	uint32_t offset:13;
+	uint32_t rsv0:19;
+	uint32_t length:30;
+	uint32_t final:1;
+	uint32_t extion:1;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint32_t rsv1:24;
+} __rte_packed;
 
-/* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	__le32 rev3;
-	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	__le32 rev5;
-	__le32 cmd;
-};
+	uint32_t rsv0;
+	uint32_t ssd:12;
+	uint32_t sss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint32_t rsv3:17;
+	uint32_t prefetch:1;
+	uint32_t rsv4:1;
+	uint32_t ssen:1;
+	uint32_t rthrotl:4;
+	uint32_t sqos:3;
+	uint32_t ns:1;
+	uint32_t srttype:4;
+} __rte_packed;
 
-/* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	__le32 rev1;
-	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	__le32 rev3;
-	__le32 cmd;
+	uint32_t rsv0;
+	uint32_t dsd:12;
+	uint32_t dss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint16_t rsv3;
+	uint32_t lwc:2;
+	uint32_t rsv4:1;
+	uint32_t dsen:1;
+	uint32_t wthrotl:4;
+	uint32_t dqos:3;
+	uint32_t ns:1;
+	uint32_t dwttype:4;
+} __rte_packed;
+
+struct fsl_qdma_df {
+	struct fsl_qdma_sdf sdf;
+	struct fsl_qdma_ddf ddf;
 };
 
-struct fsl_qdma_chan {
-	struct fsl_qdma_engine	*qdma;
-	struct fsl_qdma_queue	*queue;
-	bool			free;
-	struct list_head	list;
+#define FSL_QDMA_SG_MAX_ENTRY 64
+#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE)
+struct fsl_qdma_cmpd_ft {
+	struct fsl_qdma_comp_sg_desc desc_buf;
+	struct fsl_qdma_comp_sg_desc desc_sbuf;
+	struct fsl_qdma_comp_sg_desc desc_dbuf;
+	uint64_t cache_align[2];
+	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
+	uint64_t phy_ssge;
+	uint64_t phy_dsge;
+	uint64_t phy_df;
+} __rte_packed;
+
+#define DPAA_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
+
+struct fsl_qdma_desc {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint64_t flag;
+	uint64_t len;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format	*virt_head;
-	struct list_head	comp_used;
-	struct list_head	comp_free;
-	dma_addr_t		bus_addr;
-	u32			n_cq;
-	u32			id;
-	u32			count;
-	u32			pending;
-	struct fsl_qdma_format	*cq;
-	void			*block_base;
-	struct rte_dma_stats	stats;
+	int used;
+	struct fsl_qdma_cmpd_ft **ft;
+	uint16_t ci;
+	struct rte_ring *complete_burst;
+	struct rte_ring *complete_desc;
+	struct rte_ring *complete_pool;
+	uint16_t n_cq;
+	uint8_t block_id;
+	uint8_t queue_id;
+	uint8_t channel_id;
+	void *block_vir;
+	uint32_t le_cqmr;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t desc_in_hw[QDMA_QUEUE_SIZE];
+	struct rte_dma_stats stats;
+	struct fsl_qdma_desc *pending_desc;
+	uint16_t pending_max;
+	uint16_t pending_start;
+	uint16_t pending_num;
+	uint16_t complete_start;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
-struct fsl_qdma_comp {
-	dma_addr_t		bus_addr;
-	dma_addr_t		desc_bus_addr;
-	void			*virt_addr;
-	int			index;
-	void			*desc_virt_addr;
-	struct fsl_qdma_chan	*qchan;
-	dma_call_back		call_back_func;
-	void			*params;
-	struct list_head	list;
+struct fsl_qdma_status_queue {
+	uint16_t n_cq;
+	uint16_t complete;
+	uint8_t block_id;
+	void *block_vir;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	struct rte_dma_stats stats;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
 struct fsl_qdma_engine {
-	int			desc_allocated;
-	void			*ctrl_base;
-	void			*status_base;
-	void			*block_base;
-	u32			n_chans;
-	u32			n_queues;
-	int			error_irq;
-	struct fsl_qdma_queue	*queue;
-	struct fsl_qdma_queue	**status;
-	struct fsl_qdma_chan	*chans;
-	u32			num_blocks;
-	u8			free_block_id;
-	u32			vchan_map[4];
-	int			block_offset;
+	void *reg_base;
+	void *ctrl_base;
+	void *status_base;
+	void *block_base;
+	uint32_t n_queues;
+	uint8_t block_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES];
+	struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
+	uint32_t num_blocks;
+	int block_offset;
 };
 
-static rte_atomic32_t wait_task[CORE_NUMBER];
-
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 09/15] dma/dpaa: support burst capacity API
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (7 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 08/15] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 10/15] dma/dpaa: add silent mode support Gagandeep Singh
                                   ` (6 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch improves the dpaa qdma driver and
adds dpaa_qdma_burst_capacity API which returns the
remaining space in the descriptor ring.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index a10a867580..94be9c5fd1 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1039,6 +1039,15 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
+{
+	const struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	return fsl_queue->pending_max - fsl_queue->pending_num;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
@@ -1152,6 +1161,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
+	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
 	ret = dpaa_qdma_init(dmadev);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 10/15] dma/dpaa: add silent mode support
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (8 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
                                   ` (5 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add silent mode support.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 46 ++++++++++++++++++++++++++++++++----
 drivers/dma/dpaa/dpaa_qdma.h |  1 +
 2 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 94be9c5fd1..02f8685c48 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -119,6 +119,7 @@ dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 static int
 fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
+	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_comp_cmd_desc *ccdf;
@@ -173,7 +174,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		ccdf = &queue->cq[i];
 		qdma_desc_addr_set64(ccdf, phy_ft);
 		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
-
+		if (!fsl_qdma->is_silent)
+			ccdf->ser = 1;
 		ccdf->queue = queue->queue_id;
 	}
 	queue->ci = 0;
@@ -575,9 +577,12 @@ static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	uint16_t num)
 {
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 	uint16_t i, idx, start, dq;
 	int ret, dq_cnt;
 
+	if (fsl_qdma->is_silent)
+		return 0;
 
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
 eq_again:
@@ -622,17 +627,34 @@ static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
 	int overflow = 0;
+	uint32_t reg;
 	uint16_t blk_drain, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
-	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+	if (fsl_qdma->is_silent) {
+		reg = qdma_readl_be(block +
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
 			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
 
-	if (likely(!overflow))
+	if (likely(!overflow)) {
 		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num >= 10000) {
+			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
+			check_num = 0;
+		}
+		goto overflow_check;
+	}
 
 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
@@ -877,10 +899,13 @@ dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 }
 
 static int
-dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-	__rte_unused const struct rte_dma_conf *dev_conf,
+dpaa_qdma_configure(struct rte_dma_dev *dmadev,
+	const struct rte_dma_conf *dev_conf,
 	__rte_unused uint32_t conf_sz)
 {
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+
+	fsl_qdma->is_silent = dev_conf->enable_silent;
 	return 0;
 }
 
@@ -966,6 +991,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode");
+
+		return 0;
+	}
+
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
 			fsl_queue->block_id);
 	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
@@ -995,6 +1026,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode");
+
+		return 0;
+	}
 
 	*has_error = false;
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 75c014f32f..9b69db517e 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -257,6 +257,7 @@ struct fsl_qdma_engine {
 	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
 	uint32_t num_blocks;
 	int block_offset;
+	int is_silent;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 11/15] dma/dpaa: add workaround for ERR050757
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (9 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 10/15] dma/dpaa: add silent mode support Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
                                   ` (4 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

ERR050757 on LS104x indicates:

For outbound PCIe read transactions, a completion buffer is used
to store the PCIe completions till the data is passed back to the
initiator. At most 16 outstanding transactions are allowed and
maximum read request is 256 bytes. The completion buffer size
inside the controller needs to be at least 4KB, but the PCIe
controller has 3 KB of buffer. In case the size of pending
outbound read transactions of more than 3KB, the PCIe controller
may drop the incoming completions without notifying the initiator
of the transaction, leaving transactions unfinished. All
subsequent outbound reads to PCIe are blocked permanently.
To avoid qDMA hang as it keeps waiting for data that was silently
dropped, set stride mode for qDMA.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       |  3 ++-
 doc/guides/dmadevs/dpaa.rst  |  2 ++
 drivers/dma/dpaa/dpaa_qdma.c | 38 +++++++++++++++++++++++++++++++++---
 drivers/dma/dpaa/dpaa_qdma.h | 19 +++++++-----------
 4 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index 012935d5d7..f81e466318 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -468,7 +468,8 @@ soc_dpaa = {
         ['RTE_MACHINE', '"dpaa"'],
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
-        ['RTE_MAX_NUMA_NODES', 1]
+        ['RTE_MAX_NUMA_NODES', 1],
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index f99bfc6087..746919ec6b 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -42,6 +42,8 @@ Compilation
 For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
+- ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+
 Initialization
 --------------
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 02f8685c48..026ba124e1 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,7 +167,6 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -449,8 +448,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
-			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
+			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+			temp->le_cqmr = reg;
 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
 		}
 
@@ -694,6 +694,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ret = fsl_qdma_enqueue_overflow(fsl_queue);
 	if (unlikely(ret))
@@ -701,6 +704,19 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -733,6 +749,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	uint32_t total_len;
 	uint16_t start, idx, num, i, next_idx;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 eq_sg:
 	total_len = 0;
@@ -798,6 +817,19 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	ft->desc_dsge[num - 1].final = 1;
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
 	if (ret)
 		return ret;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 9b69db517e..171c093117 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -77,8 +77,6 @@
 #define FSL_QDMA_DMR_DQD		0x40000000
 #define FSL_QDMA_DSR_DB			0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE	64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
@@ -88,18 +86,15 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
-#define FSL_QDMA_CMD_LWC_OFFSET		16
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
-#define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		64
-#define QDMA_CCSR_BASE			0x8380000
-#define VIRT_CHANNELS			32
-#define QDMA_BLOCK_OFFSET		0x10000
-#define QDMA_BLOCKS			4
-#define QDMA_QUEUES			8
-#define QDMA_DELAY			1000
+#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
+#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
+#define QDMA_CCSR_BASE 0x8380000
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
 #define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 12/15] dma/dpaa: qdma stall workaround for ERR050265
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (10 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
                                   ` (3 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Non-prefetchable read setting in the source descriptor may be
required for targets other than local memory. Prefetchable read
setting will offer better performance for misaligned transfers
in the form of fewer transactions and should be set if possible.
This patch also fixes QDMA stall issue due to unaligned
transactions.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       | 3 ++-
 doc/guides/dmadevs/dpaa.rst  | 1 +
 drivers/dma/dpaa/dpaa_qdma.c | 9 +++++++++
 3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index f81e466318..f63ef41130 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -469,7 +469,8 @@ soc_dpaa = {
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
         ['RTE_MAX_NUMA_NODES', 1],
-	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true],
+	['RTE_DMA_DPAA_ERRATA_ERR050265', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 746919ec6b..8a7c0befc3 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -43,6 +43,7 @@ For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
 - ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+- ``RTE_DMA_DPAA_ERRATA_ERR050265`` - enable software workaround for Errata-A050265
 
 Initialization
 --------------
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 026ba124e1..b2e96432fb 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,6 +167,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -707,6 +710,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
@@ -820,6 +826,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 13/15] dma/dpaa: add Scatter Gather support
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (11 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
                                   ` (2 subsequent siblings)
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Support copy_sg operation for scatter gather.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 55 ++++++++++++++++++++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h | 10 ++++++-
 2 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index b2e96432fb..7c199b6dd0 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1021,6 +1021,60 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	return ret;
 }
 
+static int
+dpaa_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
+{
+	int ret;
+	uint16_t i, start, idx;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
+			__func__, nb_src, nb_dst, vchan);
+		return -EINVAL;
+	}
+
+	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	start = fsl_queue->pending_start + fsl_queue->pending_num;
+	start = start & (fsl_queue->pending_max - 1);
+	idx = start;
+
+	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+
+	for (i = 0; i < nb_src; i++) {
+		if (unlikely(src[i].length != dst[i].length)) {
+			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
+				src[i].length, dst[i].length);
+			return -EINVAL;
+		}
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		fsl_queue->pending_desc[idx].src = src[i].addr;
+		fsl_queue->pending_desc[idx].dst = dst[i].addr;
+		fsl_queue->pending_desc[idx].len = dst[i].length;
+		fsl_queue->pending_desc[idx].flag = idx_addr[i];
+	}
+	fsl_queue->pending_num += nb_src;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
+
+	return ret;
+}
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
@@ -1235,6 +1289,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->device = &dpaa_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
+	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 171c093117..1e820d0207 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -24,8 +24,13 @@
 #define QDMA_STATUS_REGION_OFFSET \
 	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
 #define QDMA_STATUS_REGION_SIZE 0x10000
-#define DPAA_QDMA_COPY_IDX_OFFSET 8
+
 #define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(DPAA_QDMA_COPY_IDX_OFFSET)
+#define DPAA_QDMA_SG_IDX_ADDR_MASK \
+	(DPAA_QDMA_SG_IDX_ADDR_ALIGN - 1)
 
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
@@ -194,6 +199,9 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
+
 #define DPAA_QDMA_IDX_FROM_FLAG(flag) \
 	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 14/15] dma/dpaa: add DMA error checks
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (12 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-14  9:36                 ` [v6 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Sachin Saxena, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add user configurable DMA error checks.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/dmadevs/dpaa.rst      |   6 ++
 drivers/dma/dpaa/dpaa_qdma.c     | 135 ++++++++++++++++++++++++++++++-
 drivers/dma/dpaa/dpaa_qdma.h     |  42 ++++++++++
 drivers/net/dpaa2/dpaa2_ethdev.c |   2 +-
 4 files changed, 183 insertions(+), 2 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 8a7c0befc3..a60457229a 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -69,3 +69,9 @@ Platform Requirement
 
 DPAA DMA driver for DPDK can only work on NXP SoCs
 as listed in the `Supported DPAA SoCs`_.
+
+Device Arguments
+----------------
+
+Use dev arg option ``dpaa_dma_err_check=1`` to check DMA errors at
+driver level. usage example: ``dpaa_bus:dpaa_qdma-1,dpaa_dma_err_check=1``
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 7c199b6dd0..7b9d893cbe 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,11 +4,15 @@
 
 #include <bus_dpaa_driver.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_kvargs.h>
 
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
 static uint32_t s_sg_max_entry_sz = 2000;
+static bool s_hw_err_check;
+
+#define DPAA_DMA_ERROR_CHECK "dpaa_dma_err_check"
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -638,7 +642,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 
 	check_num = 0;
 overflow_check:
-	if (fsl_qdma->is_silent) {
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
 		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -1076,13 +1080,81 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+	struct fsl_qdma_err_reg local;
+	size_t i, offset = 0;
+	char err_msg[512];
+
+	local.dedr_be = rte_read32(&reg->dedr_be);
+	if (!local.dedr_be)
+		return 0;
+	offset = sprintf(err_msg, "ERR detected:");
+	if (local.dedr.ere) {
+		offset += sprintf(&err_msg[offset],
+			" ere(Enqueue rejection error)");
+	}
+	if (local.dedr.dde) {
+		offset += sprintf(&err_msg[offset],
+			" dde(Destination descriptor error)");
+	}
+	if (local.dedr.sde) {
+		offset += sprintf(&err_msg[offset],
+			" sde(Source descriptor error)");
+	}
+	if (local.dedr.cde) {
+		offset += sprintf(&err_msg[offset],
+			" cde(Command descriptor error)");
+	}
+	if (local.dedr.wte) {
+		offset += sprintf(&err_msg[offset],
+			" wte(Write transaction error)");
+	}
+	if (local.dedr.rte) {
+		offset += sprintf(&err_msg[offset],
+			" rte(Read transaction error)");
+	}
+	if (local.dedr.me) {
+		offset += sprintf(&err_msg[offset],
+			" me(Multiple errors of the same type)");
+	}
+	DPAA_QDMA_ERR("%s", err_msg);
+	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+			QDMA_IN(&reg->deccd_le[i]);
+	}
+	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
+	local.decbr = rte_read32(&reg->decbr);
+
+	offset = sprintf(err_msg, "ERR command:");
+	offset += sprintf(&err_msg[offset],
+		" status: %02x, ser: %d, offset:%d, fmt: %02x",
+		local.err_cmd.status, local.err_cmd.ser,
+		local.err_cmd.offset, local.err_cmd.format);
+	offset += sprintf(&err_msg[offset],
+		" address: 0x%"PRIx64", queue: %d, dd: %02x",
+		(uint64_t)local.err_cmd.addr_hi << 32 |
+		local.err_cmd.addr_lo,
+		local.err_cmd.queue, local.err_cmd.dd);
+	DPAA_QDMA_ERR("%s", err_msg);
+	DPAA_QDMA_ERR("ERR command block: %d, queue: %d",
+		local.deccqidr.block, local.deccqidr.queue);
+
+	rte_write32(local.dedr_be, &reg->dedr_be);
+
+	return -EIO;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1107,6 +1179,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err)
+			fsl_queue->stats.errors++;
+	}
 
 	return dq_num;
 }
@@ -1117,7 +1195,9 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1138,6 +1218,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	for (i = 0; i < dq_num; i++)
 		last_idx[i] = desc_complete[i]->flag;
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err) {
+			if (has_error)
+				*has_error = true;
+			fsl_queue->stats.errors++;
+		}
+	}
+
 	return dq_num;
 }
 
@@ -1189,6 +1279,43 @@ static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.stats_reset		  = dpaa_qdma_stats_reset,
 };
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	if (strcmp(value, "1"))
+		return -1;
+
+	return 0;
+}
+
+static int
+dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
+}
+
 static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
@@ -1199,6 +1326,11 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ret;
 	uint32_t i, j, k;
 
+	if (dpaa_get_devargs(dmadev->device->devargs, DPAA_DMA_ERROR_CHECK)) {
+		s_hw_err_check = true;
+		DPAA_QDMA_INFO("Enable DMA error checks");
+	}
+
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -1340,4 +1472,5 @@ static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
 };
 
 RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(dpaa_qdma, DPAA_DMA_ERROR_CHECK "=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 1e820d0207..91eaf1455a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -199,6 +199,48 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+	uint32_t me:1;
+	uint32_t rsv0:1;
+	uint32_t rte:1;
+	uint32_t wte:1;
+	uint32_t cde:1;
+	uint32_t sde:1;
+	uint32_t dde:1;
+	uint32_t ere:1;
+	uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+	uint32_t rsv:27;
+	uint32_t block:2;
+	uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+	(sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+	uint32_t deier;
+	union {
+		rte_be32_t dedr_be;
+		struct fsl_qdma_dedr_reg dedr;
+	};
+	uint32_t rsv0[2];
+	union {
+		rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+		struct fsl_qdma_comp_cmd_desc err_cmd;
+	};
+	uint32_t rsv1[4];
+	union {
+		rte_be32_t deccqidr_be;
+		struct fsl_qdma_deccqidr_reg deccqidr;
+	};
+	rte_be32_t decbr;
+};
+
 #define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
 
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 93b88acef8..408418f032 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2685,7 +2685,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 
 	if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
 		dpaa2_enable_err_queue = 1;
-		DPAA2_PMD_INFO("Enable error queue");
+		DPAA2_PMD_INFO("Enable DMA error checks");
 	}
 
 	/* Allocate memory for hardware structure for queues */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v6 15/15] bus/dpaa: add port bmi stats
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (13 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
@ 2024-10-14  9:36                 ` Gagandeep Singh
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  15 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-14  9:36 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add BMI statistics and fixing the existing extended
statistics

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 65 +++++++++++++++++++++++++++-
 drivers/bus/dpaa/include/fman.h      |  4 +-
 drivers/bus/dpaa/include/fsl_fman.h  | 12 +++++
 drivers/bus/dpaa/version.map         |  4 ++
 drivers/net/dpaa/dpaa_ethdev.c       | 46 +++++++++++++++++---
 drivers/net/dpaa/dpaa_ethdev.h       | 12 +++++
 6 files changed, 134 insertions(+), 9 deletions(-)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 24a99f7235..27b39a4975 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -244,8 +244,8 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
 
 	for (i = 0; i < n; i++)
-		value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) |
-				(u64)in_be32((char *)regs + base_offset +
+		value[i] = ((u64)in_be32((char *)regs + base_offset + 8 * i) |
+				((u64)in_be32((char *)regs + base_offset +
 				8 * i + 4)) << 32);
 }
 
@@ -266,6 +266,67 @@ fman_if_stats_reset(struct fman_if *p)
 		;
 }
 
+void
+fman_if_bmi_stats_enable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp |= FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_disable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp &= ~FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	int i = 0;
+
+	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
+	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
+}
+
+void
+fman_if_bmi_stats_reset(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+
+	out_be32(&regs->fmbm_rfrc, 0);
+	out_be32(&regs->fmbm_rfbc, 0);
+	out_be32(&regs->fmbm_rlfc, 0);
+	out_be32(&regs->fmbm_rffc, 0);
+	out_be32(&regs->fmbm_rfdc, 0);
+	out_be32(&regs->fmbm_rfldec, 0);
+	out_be32(&regs->fmbm_rodc, 0);
+	out_be32(&regs->fmbm_rbdc, 0);
+}
+
 void
 fman_if_promiscuous_enable(struct fman_if *p)
 {
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index f918836ec2..1f120b7614 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -56,6 +56,8 @@
 #define FMAN_PORT_BMI_FIFO_UNITS	0x100
 #define FMAN_PORT_IC_OFFSET_UNITS	0x10
 
+#define FMAN_BMI_COUNTERS_EN 0x80000000
+
 #define FMAN_ENABLE_BPOOL_DEPLETION	0xF00000F0
 
 #define HASH_CTRL_MCAST_EN	0x00000100
@@ -260,7 +262,7 @@ struct rx_bmi_regs {
 					/**< Buffer Manager pool Information-*/
 	uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
 					/**< Allocate Counter-*/
-	uint32_t reserved0130[8];
+	uint32_t reserved0120[16];
 					/**< 0x130/0x140 - 0x15F reserved -*/
 	uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
 					/**< Congestion Group Map*/
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 20690f8329..5a9750ad0c 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -60,6 +60,18 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
+__rte_internal
+void fman_if_bmi_stats_enable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_disable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value);
+
+__rte_internal
+void fman_if_bmi_stats_reset(struct fman_if *p);
+
 /* Set ignore pause option for a specific interface */
 void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
 
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 3f547f75cf..a17d57632e 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -24,6 +24,10 @@ INTERNAL {
 	fman_dealloc_bufs_mask_hi;
 	fman_dealloc_bufs_mask_lo;
 	fman_if_add_mac_addr;
+	fman_if_bmi_stats_enable;
+	fman_if_bmi_stats_disable;
+	fman_if_bmi_stats_get_all;
+	fman_if_bmi_stats_reset;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
 	fman_if_discard_rx_errors;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 51f5422e0c..da4a64d99a 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -131,6 +131,22 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 		offsetof(struct dpaa_if_stats, tvlan)},
 	{"rx_undersized",
 		offsetof(struct dpaa_if_stats, tund)},
+	{"rx_frame_counter",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
+	{"rx_bad_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
+	{"rx_large_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
+	{"rx_filter_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
+	{"rx_frame_discrad_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
+	{"rx_frame_list_dma_err_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
+	{"rx_out_of_buffer_discard ",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
+	{"rx_buf_diallocate",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
@@ -430,6 +446,7 @@ static void dpaa_interrupt_handler(void *param)
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
 	uint16_t i;
 
 	PMD_INIT_FUNC_TRACE();
@@ -443,7 +460,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 	else
 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
 
-	fman_if_enable_rx(dev->process_private);
+	fman_if_bmi_stats_enable(fif);
+	fman_if_bmi_stats_reset(fif);
+	fman_if_enable_rx(fif);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -461,8 +480,10 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 	dev->data->dev_started = 0;
 
-	if (!fif->is_shared_mac)
+	if (!fif->is_shared_mac) {
+		fman_if_bmi_stats_disable(fif);
 		fman_if_disable_rx(fif);
+	}
 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -769,6 +790,7 @@ static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	fman_if_stats_reset(dev->process_private);
+	fman_if_bmi_stats_reset(dev->process_private);
 
 	return 0;
 }
@@ -777,8 +799,9 @@ static int
 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned int n)
 {
-	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (n < num)
 		return num;
@@ -789,10 +812,16 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	fman_if_stats_get_all(dev->process_private, values,
 			      sizeof(struct dpaa_if_stats) / 8);
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < num - (bmi_count - 1); i++) {
 		xstats[i].id = i;
 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
 	}
+	fman_if_bmi_stats_get_all(dev->process_private, values);
+	for (j = 0; i < num; i++, j++) {
+		xstats[i].id = i;
+		xstats[i].value = values[j];
+	}
+
 	return i;
 }
 
@@ -819,8 +848,9 @@ static int
 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		      uint64_t *values, unsigned int n)
 {
-	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (!ids) {
 		if (n < stat_cnt)
@@ -832,10 +862,14 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		fman_if_stats_get_all(dev->process_private, values_copy,
 				      sizeof(struct dpaa_if_stats) / 8);
 
-		for (i = 0; i < stat_cnt; i++)
+		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
 			values[i] =
 				values_copy[dpaa_xstats_strings[i].offset / 8];
 
+		fman_if_bmi_stats_get_all(dev->process_private, values);
+		for (j = 0; i < stat_cnt; i++, j++)
+			values[i] = values_copy[j];
+
 		return stat_cnt;
 	}
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 0006bd33d4..1278623e7b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -212,6 +212,18 @@ dpaa_rx_cb_atomic(void *event,
 		  const struct qm_dqrr_entry *dqrr,
 		  void **bufs);
 
+struct dpaa_if_rx_bmi_stats {
+	uint32_t fmbm_rstc;		/**< Rx Statistics Counters*/
+	uint32_t fmbm_rfrc;		/**< Rx Frame Counter*/
+	uint32_t fmbm_rfbc;		/**< Rx Bad Frames Counter*/
+	uint32_t fmbm_rlfc;		/**< Rx Large Frames Counter*/
+	uint32_t fmbm_rffc;		/**< Rx Filter Frames Counter*/
+	uint32_t fmbm_rfdc;		/**< Rx Frame Discard Counter*/
+	uint32_t fmbm_rfldec;		/**< Rx Frames List DMA Error Counter*/
+	uint32_t fmbm_rodc;		/**< Rx Out of Buffers Discard nntr*/
+	uint32_t fmbm_rbdc;		/**< Rx Buffers Deallocate Counter*/
+};
+
 /* PMD related logs */
 extern int dpaa_logtype_pmd;
 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v6 08/15] dma/dpaa: refactor driver
  2024-10-14  9:36                 ` [v6 08/15] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-10-15  2:59                   ` Stephen Hemminger
  0 siblings, 0 replies; 165+ messages in thread
From: Stephen Hemminger @ 2024-10-15  2:59 UTC (permalink / raw)
  To: Gagandeep Singh; +Cc: dev, Sachin Saxena, Jun Yang

On Mon, 14 Oct 2024 15:06:32 +0530
Gagandeep Singh <g.singh@nxp.com> wrote:

> @@ -551,7 +485,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
>  
>  		/* Initialize the status queue mode. */
>  		reg = FSL_QDMA_BSQMR_EN;
> -		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
> +		val = ilog2_qsize(temp_stat->n_cq);
>  		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
>  		qdma_writel(reg, block + FSL_QDMA_BSQMR);
>  	}
> @@ -563,158 +497,389 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
>  	return 0;
>  }
>  
> -static void *
> -fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
> -			   dma_addr_t src, size_t len,
> -			   void *call_back,
> -			   void *param)
> +static uint16_t
> +dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
> +	uint8_t block_id)
>  {
> -	struct fsl_qdma_comp *fsl_comp;
> +	struct fsl_qdma_status_queue *stat_queue;
> +	struct fsl_qdma_queue *cmd_queue;
> +	struct fsl_qdma_comp_cmd_desc *cq;
> +	uint16_t start, count = 0;
> +	uint8_t qid = 0;
> +	uint32_t reg;
> +	int ret;
> +	uint8_t *block;
> +	uint16_t *dq_complete;
> +	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
>  
> -	fsl_comp =
> -	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
> -	if (!fsl_comp)
> -		return NULL;
> +	stat_queue = &fsl_qdma->stat_queues[block_id];
> +	cq = stat_queue->cq;
> +	start = stat_queue->complete;
> +
> +	block = fsl_qdma->block_base +
> +		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
>  
> -	fsl_comp->qchan = fsl_chan;
> -	fsl_comp->call_back_func = call_back;
> -	fsl_comp->params = param;
> +	do {
> +		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
> +		if (reg & FSL_QDMA_BSQSR_QE_BE)
> +			break;
>  
> -	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
> -	return (void *)fsl_comp;
> +		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
> +		ret = qdma_ccdf_get_queue(&cq[start], &qid);
> +		if (ret == true) {
> +			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
> +
> +			ret = rte_ring_dequeue(cmd_queue->complete_burst,
> +				(void **)&dq_complete);
> +			if (ret)
> +				rte_panic("DQ desc number failed!\n");

Please don't panic here, either recover, log an error or take the device
offline. Killing the whole application is not acceptable.

^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 00/15] NXP DMA driver fixes and Enhancements
  2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                   ` (14 preceding siblings ...)
  2024-10-14  9:36                 ` [v6 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
@ 2024-10-15  7:13                 ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
                                     ` (14 more replies)
  15 siblings, 15 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev

V7 changes:
* remove rte_panic from driver code.

V6 changes:
* fix array bound compilation warning with GCC 11.4

V5 changes:
* typos and doc compilation fixed

V4 changes:
* rebased series to latest commit and patches reduced.

V3 changes:
* fix 32 bit compilation issue

V2 changes:
* fix compilation issue on ubuntu 22.04

Hemant Agrawal (1):
  bus/dpaa: add port bmi stats

Jun Yang (14):
  dma/dpaa2: configure route by port by PCIe port param
  dma/dpaa2: refactor driver code
  bus/fslmc: enhance the qbman dq storage logic
  dma/dpaa2: add short FD support
  dma/dpaa2: limit the max descriptor number
  dma/dpaa2: change the DMA copy return value
  dma/dpaa2: move the qdma header to common place
  dma/dpaa: refactor driver
  dma/dpaa: support burst capacity API
  dma/dpaa: add silent mode support
  dma/dpaa: add workaround for ERR050757
  dma/dpaa: qdma stall workaround for ERR050265
  dma/dpaa: add Scatter Gather support
  dma/dpaa: add DMA error checks

 config/arm/meson.build                        |    4 +-
 doc/api/doxy-api-index.md                     |    2 +-
 doc/api/doxy-api.conf.in                      |    2 +-
 doc/guides/dmadevs/dpaa.rst                   |    9 +
 doc/guides/dmadevs/dpaa2.rst                  |   10 +
 drivers/bus/dpaa/base/fman/fman_hw.c          |   65 +-
 drivers/bus/dpaa/include/fman.h               |    4 +-
 drivers/bus/dpaa/include/fsl_fman.h           |   12 +
 drivers/bus/dpaa/version.map                  |    4 +
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c      |   25 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c      |    7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h       |   38 +-
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  |   29 +-
 drivers/common/dpaax/meson.build              |    3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h     |   23 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c   |   23 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c   |    4 +-
 drivers/dma/dpaa/dpaa_qdma.c                  | 1597 +++++++----
 drivers/dma/dpaa/dpaa_qdma.h                  |  292 +-
 drivers/dma/dpaa2/dpaa2_qdma.c                | 2436 +++++++++--------
 drivers/dma/dpaa2/dpaa2_qdma.h                |  243 +-
 drivers/dma/dpaa2/meson.build                 |    4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        |  177 --
 drivers/dma/dpaa2/version.map                 |   14 -
 drivers/net/dpaa/dpaa_ethdev.c                |   46 +-
 drivers/net/dpaa/dpaa_ethdev.h                |   12 +
 drivers/net/dpaa2/dpaa2_ethdev.c              |   83 +-
 drivers/net/dpaa2/dpaa2_rxtx.c                |   19 +-
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c         |    4 +-
 29 files changed, 2894 insertions(+), 2297 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
 delete mode 100644 drivers/dma/dpaa2/version.map

-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 01/15] dma/dpaa2: configure route by port by PCIe port param
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
                                     ` (13 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

struct {
	uint64_t coreid : 4; /**--rbp.sportid / rbp.dportid*/
	uint64_t pfid : 8; /**--rbp.spfid / rbp.dpfid*/
	uint64_t vfen : 1; /**--rbp.svfa / rbp.dvfa*/
	uint64_t vfid : 16; /**--rbp.svfid / rbp.dvfid*/
	.....
} pcie;

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  | 29 ++++++---
 drivers/dma/dpaa2/dpaa2_qdma.c                | 59 +++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h                | 38 +++++++++++-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h        | 55 +----------------
 drivers/dma/dpaa2/version.map                 |  1 -
 5 files changed, 100 insertions(+), 82 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48ffb1b46e..7528b610e1 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Copyright 2017-2019 NXP
+ * Copyright 2017-2024 NXP
  *
  */
 #ifndef _FSL_QBMAN_BASE_H
@@ -141,12 +141,23 @@ struct qbman_fd {
 			uint32_t saddr_hi;
 
 			uint32_t len_sl:18;
-			uint32_t rsv1:14;
-
+			uint32_t rsv13:2;
+			uint32_t svfid:6;
+			uint32_t rsv12:2;
+			uint32_t spfid:2;
+			uint32_t rsv1:2;
 			uint32_t sportid:4;
-			uint32_t rsv2:22;
+			uint32_t rsv2:1;
+			uint32_t sca:1;
+			uint32_t sat:2;
+			uint32_t sattr:3;
+			uint32_t svfa:1;
+			uint32_t stc:3;
 			uint32_t bmt:1;
-			uint32_t rsv3:1;
+			uint32_t dvfid:6;
+			uint32_t rsv3:2;
+			uint32_t dpfid:2;
+			uint32_t rsv31:2;
 			uint32_t fmt:2;
 			uint32_t sl:1;
 			uint32_t rsv4:1;
@@ -154,12 +165,14 @@ struct qbman_fd {
 			uint32_t acc_err:4;
 			uint32_t rsv5:4;
 			uint32_t ser:1;
-			uint32_t rsv6:3;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
 			uint32_t wrttype:4;
 			uint32_t dqos:3;
 			uint32_t drbp:1;
 			uint32_t dlwc:2;
-			uint32_t rsv7:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
 			uint32_t rdttype:4;
 			uint32_t sqos:3;
 			uint32_t srbp:1;
@@ -182,7 +195,7 @@ struct qbman_fd {
 			uint32_t saddr_lo;
 
 			uint32_t saddr_hi:17;
-			uint32_t rsv1:15;
+			uint32_t rsv1_att:15;
 
 			uint32_t len;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5780e49297..5d4749eae3 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -22,7 +22,7 @@ uint32_t dpaa2_coherent_alloc_cache;
 static inline int
 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
 		     uint32_t len, struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_rbp *rbp, int ser)
+		     struct dpaa2_qdma_rbp *rbp, int ser)
 {
 	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
 	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
@@ -93,7 +93,7 @@ qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
 static void
 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 			uint64_t fle_iova,
-			struct rte_dpaa2_qdma_rbp *rbp,
+			struct dpaa2_qdma_rbp *rbp,
 			uint64_t src, uint64_t dest,
 			size_t len, uint32_t flags, uint32_t fmt)
 {
@@ -114,7 +114,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* source */
 		sdd->read_cmd.portid = rbp->sportid;
 		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->svfid;
 
 		if (rbp->srbp) {
@@ -127,7 +126,6 @@ dpaa2_qdma_populate_fle(struct qbman_fle *fle,
 		/* destination */
 		sdd->write_cmd.portid = rbp->dportid;
 		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfa = rbp->vfa;
 		sdd->rbpcmd_simple.vfid = rbp->dvfid;
 
 		if (rbp->drbp) {
@@ -178,7 +176,7 @@ dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
 		     struct rte_dpaa2_qdma_job **job,
 		     uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	size_t iova;
 	int ret = 0, loop;
@@ -276,7 +274,7 @@ dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
 				  struct rte_dpaa2_qdma_job **job,
 				  uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	void *elem;
@@ -322,7 +320,7 @@ dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
 			   struct rte_dpaa2_qdma_job **job,
 			   uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	uint16_t i;
 	int ret;
@@ -375,7 +373,7 @@ dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
 			struct rte_dpaa2_qdma_job **job,
 			uint16_t nb_jobs)
 {
-	struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
+	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
 	struct rte_dpaa2_qdma_job **ppjob;
 	void *elem;
 	struct qbman_fle *fle;
@@ -1223,17 +1221,38 @@ rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
 	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
 }
 
-/* Enable RBP */
-void
-rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-				struct rte_dpaa2_qdma_rbp *rbp_config)
+static int
+dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
+	const struct rte_dma_vchan_conf *conf)
 {
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->dst_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.dportid = conf->dst_port.pcie.coreid;
+		vq->rbp.dpfid = conf->dst_port.pcie.pfid;
+		if (conf->dst_port.pcie.vfen) {
+			vq->rbp.dvfa = 1;
+			vq->rbp.dvfid = conf->dst_port.pcie.vfid;
+		}
+		vq->rbp.drbp = 1;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		conf->direction == RTE_DMA_DIR_DEV_TO_DEV) {
+		if (conf->src_port.port_type != RTE_DMA_PORT_PCIE)
+			return -EINVAL;
+		vq->rbp.enable = 1;
+		vq->rbp.sportid = conf->src_port.pcie.coreid;
+		vq->rbp.spfid = conf->src_port.pcie.pfid;
+		if (conf->src_port.pcie.vfen) {
+			vq->rbp.svfa = 1;
+			vq->rbp.dvfid = conf->src_port.pcie.vfid;
+		}
+		vq->rbp.srbp = 1;
+	}
 
-	memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
-			sizeof(struct rte_dpaa2_qdma_rbp));
+	return 0;
 }
 
 static int
@@ -1247,12 +1266,16 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	char ring_name[32];
 	char pool_name[64];
 	int fd_long_format = 1;
-	int sg_enable = 0;
+	int sg_enable = 0, ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
+	ret = dpaa2_qdma_vchan_rbp_set(&qdma_dev->vqs[vchan], conf);
+	if (ret)
+		return ret;
+
 	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
 		sg_enable = 1;
 
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 5941b5a5d3..811906fcbc 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -166,6 +166,42 @@ struct qdma_sg_entry {
 	};
 } __rte_packed;
 
+struct dpaa2_qdma_rbp {
+	uint32_t use_ultrashort:1;
+	uint32_t enable:1;
+	/**
+	 * dportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t dportid:4;
+	uint32_t dpfid:2;
+	uint32_t dvfid:6;
+	uint32_t dvfa:1;
+	/*using route by port for destination */
+	uint32_t drbp:1;
+	/**
+	 * sportid:
+	 * 0000 PCI-Express 1
+	 * 0001 PCI-Express 2
+	 * 0010 PCI-Express 3
+	 * 0011 PCI-Express 4
+	 * 0100 PCI-Express 5
+	 * 0101 PCI-Express 6
+	 */
+	uint32_t sportid:4;
+	uint32_t spfid:2;
+	uint32_t svfid:6;
+	uint32_t svfa:1;
+	/* using route by port for source */
+	uint32_t srbp:1;
+	uint32_t rsv:2;
+};
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
@@ -216,7 +252,7 @@ struct qdma_virt_queue {
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
 	/** Route by port */
-	struct rte_dpaa2_qdma_rbp rbp;
+	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
 	uint8_t in_use;
 	/** States if this vq has exclusively associated hw queue */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index 5a8da46d12..b0bf9d8bcc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -13,42 +13,6 @@
 /** States if the destination addresses is physical. */
 #define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
 
-struct rte_dpaa2_qdma_rbp {
-	uint32_t use_ultrashort:1;
-	uint32_t enable:1;
-	/**
-	 * dportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t dportid:4;
-	uint32_t dpfid:2;
-	uint32_t dvfid:6;
-	/*using route by port for destination */
-	uint32_t drbp:1;
-	/**
-	 * sportid:
-	 * 0000 PCI-Express 1
-	 * 0001 PCI-Express 2
-	 * 0010 PCI-Express 3
-	 * 0011 PCI-Express 4
-	 * 0100 PCI-Express 5
-	 * 0101 PCI-Express 6
-	 */
-	uint32_t sportid:4;
-	uint32_t spfid:2;
-	uint32_t svfid:6;
-	/* using route by port for source */
-	uint32_t srbp:1;
-	/* Virtual Function Active */
-	uint32_t vfa:1;
-	uint32_t rsv:3;
-};
-
 /** Determines a QDMA job */
 struct rte_dpaa2_qdma_job {
 	/** Source Address from where DMA is (to be) performed */
@@ -67,6 +31,7 @@ struct rte_dpaa2_qdma_job {
 	 */
 	uint16_t status;
 	uint16_t vq_id;
+	uint64_t cnxt;
 	/**
 	 * FLE pool element maintained by user, in case no qDMA response.
 	 * Note: the address must be allocated from DPDK memory pool.
@@ -104,24 +69,6 @@ void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable Route-by-port on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param rbp_config
- *   Configuration for route-by-port
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_rbp *rbp_config);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
index fc16517f7a..43e8b2d5c5 100644
--- a/drivers/dma/dpaa2/version.map
+++ b/drivers/dma/dpaa2/version.map
@@ -10,5 +10,4 @@ EXPERIMENTAL {
 	rte_dpaa2_qdma_copy_multi;
 	rte_dpaa2_qdma_vchan_fd_us_enable;
 	rte_dpaa2_qdma_vchan_internal_sg_enable;
-	rte_dpaa2_qdma_vchan_rbp_enable;
 };
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 02/15] dma/dpaa2: refactor driver code
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-15  7:13                   ` [v7 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15 22:11                     ` Stephen Hemminger
  2024-10-15  7:13                   ` [v7 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
                                     ` (12 subsequent siblings)
  14 siblings, 1 reply; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Anatoly Burakov; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

refactor the driver code with changes in:
- multiple HW queues
- SMA single copy and SG copy
- silent mode

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |    8 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 2200 ++++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  148 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  130 +-
 drivers/dma/dpaa2/version.map          |   13 -
 5 files changed, 1150 insertions(+), 1349 deletions(-)
 delete mode 100644 drivers/dma/dpaa2/version.map

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index d2c26231e2..eeeb5d52a8 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -73,3 +73,11 @@ Platform Requirement
 
 DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
 ``Supported DPAA2 SoCs``.
+
+Device Arguments
+----------------
+* Use dev arg option ``fle_pre_populate=1`` to pre-populate all
+  DMA descriptors with pre-initialized values.
+  usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
+* Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
+  usage example: ``fslmc:dpdmai.1,desc_debug=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5d4749eae3..e364cc8f3d 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #include <rte_eal.h>
@@ -14,220 +14,370 @@
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
-#define DPAA2_QDMA_PREFETCH "prefetch"
+#define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
+#define DPAA2_QDMA_DESC_DEBUG "desc_debug"
 
-uint32_t dpaa2_coherent_no_alloc_cache;
-uint32_t dpaa2_coherent_alloc_cache;
+static uint32_t dpaa2_coherent_no_alloc_cache;
+static uint32_t dpaa2_coherent_alloc_cache;
 
-static inline int
-qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd,
-		     struct dpaa2_qdma_rbp *rbp, int ser)
+static struct fsl_mc_io s_proc_mc_reg;
+
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
 {
-	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+	if (strcmp(value, "1"))
+		return -1;
 
-	fd->simple_pci.len_sl = len;
+	return 0;
+}
 
-	fd->simple_pci.bmt = 1;
-	fd->simple_pci.fmt = 3;
-	fd->simple_pci.sl = 1;
-	fd->simple_pci.ser = ser;
+static int
+dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
 
-	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
-	fd->simple_pci.srbp = rbp->srbp;
-	if (rbp->srbp)
-		fd->simple_pci.rdttype = 0;
-	else
-		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+	if (!devargs)
+		return 0;
 
-	/*dest is pcie memory */
-	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
-	fd->simple_pci.drbp = rbp->drbp;
-	if (rbp->drbp)
-		fd->simple_pci.wrttype = 0;
-	else
-		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
 
-	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
 
-	return 0;
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
 }
 
 static inline int
-qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
-		     uint32_t len, struct qbman_fd *fd, int ser)
+qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
+	const uint16_t *elem, uint16_t nb,
+	uint16_t *free_space)
 {
-	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
-	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
-
-	fd->simple_ddr.len = len;
-
-	fd->simple_ddr.bmt = 1;
-	fd->simple_ddr.fmt = 3;
-	fd->simple_ddr.sl = 1;
-	fd->simple_ddr.ser = ser;
-	/**
-	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
-	 * Coherent copy of cacheable memory,
-	* lookup in downstream cache, no allocate
-	 * on miss
-	 */
-	fd->simple_ddr.rns = 0;
-	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
-	/**
-	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
-	 * Coherent write of cacheable memory,
-	 * lookup in downstream cache, no allocate on miss
-	 */
-	fd->simple_ddr.wns = 0;
-	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+	uint16_t i;
 
-	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
-	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+	if (unlikely(nb > ring->free_space))
+		return 0;
 
-	return 0;
+	for (i = 0; i < nb; i++) {
+		ring->cntx_idx_ring[ring->tail] = elem[i];
+		ring->tail = (ring->tail + 1) &
+			(DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space -= nb;
+	ring->nb_in_ring += nb;
+
+	if (free_space)
+		*free_space = ring->free_space;
+
+	return nb;
 }
 
-static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			uint64_t fle_iova,
-			struct dpaa2_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags, uint32_t fmt)
+static inline int
+qdma_cntx_idx_ring_dq(struct qdma_cntx_idx_ring *ring,
+	uint16_t *elem, uint16_t max)
 {
-	struct qdma_sdd *sdd;
-	uint64_t sdd_iova;
+	int ret = ring->nb_in_ring > max ? max : ring->nb_in_ring;
+
+	if (!ret)
+		return 0;
+
+	if ((ring->start + ret) < DPAA2_QDMA_MAX_DESC) {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			ret * sizeof(uint16_t));
+		ring->start += ret;
+	} else {
+		rte_memcpy(elem,
+			&ring->cntx_idx_ring[ring->start],
+			(DPAA2_QDMA_MAX_DESC - ring->start) *
+			sizeof(uint16_t));
+		rte_memcpy(&elem[DPAA2_QDMA_MAX_DESC - ring->start],
+			&ring->cntx_idx_ring[0],
+			(ret - DPAA2_QDMA_MAX_DESC + ring->start) *
+			sizeof(uint16_t));
+		ring->start = (ring->start + ret) & (DPAA2_QDMA_MAX_DESC - 1);
+	}
+	ring->free_space += ret;
+	ring->nb_in_ring -= ret;
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_multi_eq(struct qdma_virt_queue *qdma_vq)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
+	uint16_t txq_id = dpdmai_dev->tx_queue[qdma_vq->vq_id].fqid;
+	struct qbman_eq_desc eqdesc;
+	struct qbman_swp *swp;
+	uint32_t num_to_send = 0;
+	uint16_t num_tx = 0;
+	uint32_t enqueue_loop, loop;
+	int ret;
+	struct qbman_fd *fd = qdma_vq->fd;
+	uint16_t nb_fds = qdma_vq->fd_idx, idx, dst_idx;
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_QDMA_ERR("Failed to allocate IO portal, tid: %d",
+				rte_gettid());
+			return -EIO;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	/* Prepare enqueue descriptor */
+	qbman_eq_desc_clear(&eqdesc);
+	qbman_eq_desc_set_fq(&eqdesc, txq_id);
+	qbman_eq_desc_set_no_orp(&eqdesc, 0);
+	qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+	while (nb_fds > 0) {
+		num_to_send = (nb_fds > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_fds;
 
-	sdd = (struct qdma_sdd *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
-			QDMA_FLE_SDD_OFFSET);
-	sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
+		/* Enqueue the packet to the QBMAN */
+		enqueue_loop = 0;
+		loop = num_to_send;
+
+		while (enqueue_loop < loop) {
+			ret = qbman_swp_enqueue_multiple(swp,
+				&eqdesc,
+				&fd[num_tx + enqueue_loop],
+				NULL,
+				loop - enqueue_loop);
+			if (likely(ret >= 0))
+				enqueue_loop += ret;
+		}
+		num_tx += num_to_send;
+		nb_fds -= loop;
+	}
+
+	qdma_vq->num_enqueues += num_tx;
+	if (unlikely(num_tx != qdma_vq->fd_idx)) {
+		dst_idx = 0;
+		for (idx = num_tx; idx < qdma_vq->fd_idx; idx++) {
+			rte_memcpy(&qdma_vq->fd[dst_idx],
+				&qdma_vq->fd[idx],
+				sizeof(struct qbman_fd));
+			dst_idx++;
+		}
+	}
+	qdma_vq->fd_idx -= num_tx;
+
+	return num_tx;
+}
+
+static void
+fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
+	struct dpaa2_qdma_rbp *rbp, uint64_t src, uint64_t dest,
+	uint32_t fmt)
+{
+	struct qbman_fle *fle = fle_sdd->fle;
+	struct qdma_sdd *sdd = fle_sdd->sdd;
+	uint64_t sdd_iova = DPAA2_VADDR_TO_IOVA(sdd);
 
 	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, sdd_iova);
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd)));
 
 	/* source and destination descriptor */
 	if (rbp && rbp->enable) {
 		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
 		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
 		}
-		sdd++;
 		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
 		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
 		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
 		}
-
 	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
 	}
-	fle++;
 	/* source frame list to source buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
 #endif
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
-	fle++;
 	/* destination frame list to destination buffer */
-	if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		DPAA2_SET_FLE_BMT(fle);
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
 #endif
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	fle->word4.fmt = fmt;
-	DPAA2_SET_FLE_LEN(fle, len);
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
 }
 
-static inline int
-dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
-		     struct qbman_fd *fd,
-		     struct rte_dpaa2_qdma_job **job,
-		     uint16_t nb_jobs)
+static void
+sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 {
+	uint16_t i;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+		/* source SG */
+		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+		/* destination SG */
+		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
+		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
+#else
+		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
+#endif
+	}
+}
+
+static void
+fle_sdd_sg_pre_populate(struct qdma_cntx_sg *sg_cntx,
+	struct qdma_virt_queue *qdma_vq)
+{
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	size_t iova;
-	int ret = 0, loop;
-	int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
-				0 : 1;
-
-	for (loop = 0; loop < nb_jobs; loop++) {
-		if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
-			iova = (size_t)job[loop]->dest;
-		else
-			iova = (size_t)job[loop]->src;
-
-		/* Set the metadata */
-		job[loop]->vq_id = qdma_vq->vq_id;
-		ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-		*ppjob = job[loop];
-
-		if ((rbp->drbp == 1) || (rbp->srbp == 1))
-			ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], rbp, ser);
-		else
-			ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
-					(phys_addr_t)job[loop]->dest,
-					job[loop]->len, &fd[loop], ser);
+
+	memset(sg_cntx, 0, sizeof(struct qdma_cntx_sg));
+
+	src_sge_iova = DPAA2_VADDR_TO_IOVA(src_sge);
+	dst_sge_iova = DPAA2_VADDR_TO_IOVA(dst_sge);
+
+	sg_entry_pre_populate(sg_cntx);
+	fle_sdd_pre_populate(&sg_cntx->fle_sdd,
+		rbp, src_sge_iova, dst_sge_iova,
+		QBMAN_FLE_WORD4_FMT_SGE);
+}
+
+static inline uint32_t
+sg_entry_post_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
+{
+	uint16_t i;
+	uint32_t total_len = 0;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
+
+	for (i = 0; i < (nb_sge - 1); i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
+
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
+		total_len += dst[i].length;
+
+		src_sge->ctrl.f = 0;
+		dst_sge->ctrl.f = 0;
+		src_sge++;
+		dst_sge++;
 	}
 
-	return ret;
+	if (unlikely(src[i].length != dst[i].length))
+		return -ENOTSUP;
+
+	src_sge->addr_lo = (uint32_t)src[i].addr;
+	src_sge->addr_hi = (src[i].addr >> 32);
+	src_sge->data_len.data_len_sl0 = src[i].length;
+
+	dst_sge->addr_lo = (uint32_t)dst[i].addr;
+	dst_sge->addr_hi = (dst[i].addr >> 32);
+	dst_sge->data_len.data_len_sl0 = dst[i].length;
+
+	total_len += dst[i].length;
+	sg_cntx->job_nb = nb_sge;
+
+	src_sge->ctrl.f = QDMA_SG_F;
+	dst_sge->ctrl.f = QDMA_SG_F;
+
+	return total_len;
+}
+
+static inline void
+sg_fle_post_populate(struct qbman_fle fle[],
+	size_t len)
+{
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
-static uint32_t
-qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
-		       struct qdma_sg_entry *src_sge,
-		       struct qdma_sg_entry *dst_sge,
-		       uint16_t nb_jobs)
+static inline uint32_t
+sg_entry_populate(const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst, struct qdma_cntx_sg *sg_cntx,
+	uint16_t nb_sge)
 {
 	uint16_t i;
 	uint32_t total_len = 0;
-	uint64_t iova;
+	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
+	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < nb_jobs; i++) {
-		/* source SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
-			src_sge->addr_lo = (uint32_t)jobs[i]->src;
-			src_sge->addr_hi = (jobs[i]->src >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
-			src_sge->addr_lo = (uint32_t)iova;
-			src_sge->addr_hi = iova >> 32;
-		}
-		src_sge->data_len.data_len_sl0 = jobs[i]->len;
+	for (i = 0; i < nb_sge; i++) {
+		if (unlikely(src[i].length != dst[i].length))
+			return -ENOTSUP;
+
+		src_sge->addr_lo = (uint32_t)src[i].addr;
+		src_sge->addr_hi = (src[i].addr >> 32);
+		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -235,16 +385,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		/* destination SG */
-		if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
-			dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
-			dst_sge->addr_hi = (jobs[i]->dest >> 32);
-		} else {
-			iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
-			dst_sge->addr_lo = (uint32_t)iova;
-			dst_sge->addr_hi = iova >> 32;
-		}
-		dst_sge->data_len.data_len_sl0 = jobs[i]->len;
+		dst_sge->addr_lo = (uint32_t)dst[i].addr;
+		dst_sge->addr_hi = (dst[i].addr >> 32);
+		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
@@ -252,9 +395,9 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 #else
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
 #endif
-		total_len += jobs[i]->len;
+		total_len += src[i].length;
 
-		if (i == (nb_jobs - 1)) {
+		if (i == (nb_sge - 1)) {
 			src_sge->ctrl.f = QDMA_SG_F;
 			dst_sge->ctrl.f = QDMA_SG_F;
 		} else {
@@ -265,325 +408,452 @@ qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
 		dst_sge++;
 	}
 
+	sg_cntx->job_nb = nb_sge;
+
 	return total_len;
 }
 
-static inline int
-dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
-				  struct qbman_fd *fd,
-				  struct rte_dpaa2_qdma_job **job,
-				  uint16_t nb_jobs)
+static inline void
+fle_populate(struct qbman_fle fle[],
+	struct qdma_sdd sdd[], uint64_t sdd_iova,
+	struct dpaa2_qdma_rbp *rbp,
+	uint64_t src_iova, uint64_t dst_iova, size_t len,
+	uint32_t fmt)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
-	void *elem;
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SDD_FLE], sdd_iova);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SDD_FLE],
+		(DPAA2_QDMA_MAX_SDD * (sizeof(struct qdma_sdd))));
 
-	for (i = 0; i < nb_jobs; i++) {
-		elem = job[i]->usr_elem;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem);
-#endif
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.portid =
+			rbp->sportid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.pfid =
+			rbp->spfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfid =
+			rbp->svfid;
+		sdd[DPAA2_QDMA_SRC_SDD].rbpcmd_simple.vfa =
+			rbp->svfa;
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+		if (rbp->srbp) {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rbp =
+				rbp->srbp;
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+				dpaa2_coherent_no_alloc_cache;
+		}
+		/* destination */
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.portid =
+			rbp->dportid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.pfid =
+			rbp->dpfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfid =
+			rbp->dvfid;
+		sdd[DPAA2_QDMA_DST_SDD].rbpcmd_simple.vfa =
+			rbp->dvfa;
 
-		job[i]->vq_id = qdma_vq->vq_id;
+		if (rbp->drbp) {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.rbp =
+				rbp->drbp;
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				DPAA2_RBP_MEM_RW;
+		} else {
+			sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+				dpaa2_coherent_alloc_cache;
+		}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	} else {
+		sdd[DPAA2_QDMA_SRC_SDD].read_cmd.rdtype =
+			dpaa2_coherent_no_alloc_cache;
+		sdd[DPAA2_QDMA_DST_SDD].write_cmd.wrttype =
+			dpaa2_coherent_alloc_cache;
+	}
+	/* source frame list to source buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+#endif
+	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
+	/* destination frame list to destination buffer */
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+#endif
+	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-				DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(&fle[DPAA2_QDMA_DST_FLE]);
+}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-			job[i]->src, job[i]->dest, job[i]->len,
-			job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
-	}
+static inline void
+fle_post_populate(struct qbman_fle fle[],
+	uint64_t src, uint64_t dest, size_t len)
+{
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
-	return 0;
+	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
+	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 }
 
 static inline int
-dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
-			   struct qbman_fd *fd,
-			   struct rte_dpaa2_qdma_job **job,
-			   uint16_t nb_jobs)
+dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	uint16_t i;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	uint16_t expected = qdma_vq->fd_idx;
 	int ret;
-	void *elem[DPAA2_QDMA_MAX_DESC];
-	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova;
 
-	ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
+	ret = dpaa2_qdma_multi_eq(qdma_vq);
+	if (likely(ret == expected))
+		return 0;
 
-	for (i = 0; i < nb_jobs; i++) {
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		elem_iova = rte_mempool_virt2iova(elem[i]);
-#else
-		elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
-#endif
+	return -EBUSY;
+}
 
-		ppjob = (struct rte_dpaa2_qdma_job **)
-			((uintptr_t)(uint64_t)elem[i] +
-			 QDMA_FLE_SINGLE_JOB_OFFSET);
-		*ppjob = job[i];
+static inline void
+dpaa2_qdma_fle_dump(const struct qbman_fle *fle)
+{
+	DPAA2_QDMA_INFO("addr:0x%08x-0x%08x, len:%d, frc:0x%08x, bpid:%d",
+		fle->addr_hi, fle->addr_lo, fle->length, fle->frc,
+		fle->word4.bpid);
+	DPAA2_QDMA_INFO("ivp:%d, bmt:%d, off:%d, fmt:%d, sl:%d, f:%d",
+		fle->word4.ivp, fle->word4.bmt, fle->word4.offset,
+		fle->word4.fmt, fle->word4.sl, fle->word4.f);
+}
 
-		job[i]->vq_id = qdma_vq->vq_id;
+static inline void
+dpaa2_qdma_sdd_dump(const struct qdma_sdd *sdd)
+{
+	DPAA2_QDMA_INFO("stride:%d, rbpcmd:0x%08x, cmd:0x%08x",
+		sdd->stride, sdd->rbpcmd, sdd->cmd);
+}
+
+static inline void
+dpaa2_qdma_sge_dump(const struct qdma_sg_entry *sge)
+{
+	DPAA2_QDMA_INFO("addr 0x%08x-0x%08x, len:0x%08x, ctl:0x%08x",
+		sge->addr_hi, sge->addr_lo, sge->data_len.data_len_sl0,
+		sge->ctrl_fields);
+}
 
-		fle = (struct qbman_fle *)
-			((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
-		fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+static void
+dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
+{
+	int i;
+	const struct qdma_cntx_fle_sdd *fle_sdd;
+	const struct qdma_sdd *sdd;
+	const struct qdma_cntx_sg *cntx_sg = NULL;
+	const struct qdma_cntx_long *cntx_long = NULL;
 
-		DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
-		DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
-		DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
+	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
+	sdd = fle_sdd->sdd;
 
-		memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
+	for (i = 0; i < DPAA2_QDMA_MAX_FLE; i++) {
+		DPAA2_QDMA_INFO("fle[%d] info:", i);
+		dpaa2_qdma_fle_dump(&fle[i]);
+	}
 
-		dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-				job[i]->src, job[i]->dest, job[i]->len,
-				job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
+	if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
+		fle[DPAA2_QDMA_DST_FLE].word4.fmt) {
+		DPAA2_QDMA_ERR("fle[%d].fmt(%d) != fle[%d].fmt(%d)",
+			DPAA2_QDMA_SRC_FLE,
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt,
+			DPAA2_QDMA_DST_FLE,
+			fle[DPAA2_QDMA_DST_FLE].word4.fmt);
+
+		return;
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SGE) {
+		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
+			fle_sdd);
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+		QBMAN_FLE_WORD4_FMT_SBF) {
+		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
+			fle_sdd);
+	} else {
+		DPAA2_QDMA_ERR("Unsupported fle format:%d",
+			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
+		return;
 	}
 
-	return 0;
+	for (i = 0; i < DPAA2_QDMA_MAX_SDD; i++) {
+		DPAA2_QDMA_INFO("sdd[%d] info:", i);
+		dpaa2_qdma_sdd_dump(&sdd[i]);
+	}
+
+	if (cntx_long) {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
+			cntx_long->cntx_idx);
+	}
+
+	if (cntx_sg) {
+		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
+			cntx_sg->job_nb);
+		if (!cntx_sg->job_nb ||
+			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			DPAA2_QDMA_ERR("Invalid SG job number:%d",
+				cntx_sg->job_nb);
+			return;
+		}
+		for (i = 0; i < cntx_sg->job_nb; i++) {
+			DPAA2_QDMA_INFO("sg[%d] src info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_src_entry[i]);
+			DPAA2_QDMA_INFO("sg[%d] dst info:", i);
+			dpaa2_qdma_sge_dump(&cntx_sg->sg_dst_entry[i]);
+			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
+				cntx_sg->cntx_idx[i]);
+		}
+	}
 }
 
-static inline int
-dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
-			struct qbman_fd *fd,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
+static int
+dpaa2_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
 {
-	struct dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
-	struct rte_dpaa2_qdma_job **ppjob;
-	void *elem;
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected, i;
+	uint32_t len;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_sg *cntx_sg = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+	rte_iova_t src_sge_iova, dst_sge_iova;
 	struct qbman_fle *fle;
-	uint64_t elem_iova, fle_iova, src, dst;
-	int ret = 0, i;
-	struct qdma_sg_entry *src_sge, *dst_sge;
-	uint32_t len, fmt, flags;
-
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
-		elem = job[0]->usr_elem;
+	struct qdma_sdd *sdd;
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA2_QDMA_ERR("SG entry src num(%d) != dst num(%d)",
+			nb_src, nb_dst);
+		return -ENOTSUP;
+	}
+
+	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
+			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+		return -EINVAL;
+	}
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_dev->is_silent) {
+		cntx_sg = qdma_vq->cntx_sg[qdma_vq->silent_idx];
 	} else {
-		ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
-		if (ret) {
-			DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_sg);
+		if (ret)
 			return ret;
-		}
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		idx_addr = DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+		for (i = 0; i < nb_src; i++)
+			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	elem_iova = rte_mempool_virt2iova(elem);
+	cntx_iova = rte_mempool_virt2iova(cntx_sg);
 #else
-	elem_iova = DPAA2_VADDR_TO_IOVA(elem);
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
 #endif
 
-	/* Set the metadata */
-	/* Save job context. */
-	*((uint16_t *)
-	((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
-	ppjob = (struct rte_dpaa2_qdma_job **)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
-	for (i = 0; i < nb_jobs; i++)
-		ppjob[i] = job[i];
-
-	ppjob[0]->vq_id = qdma_vq->vq_id;
-
-	fle = (struct qbman_fle *)
-		((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
-	fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
+	fle = cntx_sg->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_sg, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
 	DPAA2_SET_FD_ADDR(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
-		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_sg_pre_populate(cntx_sg, qdma_vq);
+			if (!qdma_dev->is_silent && cntx_sg && idx_addr) {
+				for (i = 0; i < nb_src; i++)
+					cntx_sg->cntx_idx[i] = idx_addr[i];
+			}
+		}
 
-	/* Populate FLE */
-	if (likely(nb_jobs > 1)) {
-		src_sge = (struct qdma_sg_entry *)
-			((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
-		dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
-		src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
-		dst = src +
-			DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
-		len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
-		fmt = QBMAN_FLE_WORD4_FMT_SGE;
-		flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
+		len = sg_entry_post_populate(src, dst,
+			cntx_sg, nb_src);
+		sg_fle_post_populate(fle, len);
 	} else {
-		src = job[0]->src;
-		dst = job[0]->dest;
-		len = job[0]->len;
-		fmt = QBMAN_FLE_WORD4_FMT_SBF;
-		flags = job[0]->flags;
+		sdd = cntx_sg->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		src_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_src_entry);
+		dst_sge_iova = cntx_iova +
+			offsetof(struct qdma_cntx_sg, sg_dst_entry);
+		len = sg_entry_populate(src, dst,
+			cntx_sg, nb_src);
+
+		fle_populate(fle, sdd, sdd_iova,
+			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
+			QBMAN_FLE_WORD4_FMT_SGE);
 	}
 
-	memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
-			DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
-
-	dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
-					src, dst, len, flags, fmt);
-
-	return 0;
-}
-
-static inline uint16_t
-dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
-		      const struct qbman_fd *fd,
-		      struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
-{
-	uint16_t vqid;
-	size_t iova;
-	struct rte_dpaa2_qdma_job **ppjob;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
-	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
-		iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
-				| (uint64_t)fd->simple_pci.daddr_lo);
-	else
-		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
-				| (uint64_t)fd->simple_pci.saddr_lo);
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
-	*job = (struct rte_dpaa2_qdma_job *)*ppjob;
-	(*job)->status = (fd->simple_pci.acc_err << 8) |
-					(fd->simple_pci.error);
-	vqid = (*job)->vq_id;
-	*nb_jobs = 1;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return vqid;
+	return ret;
 }
 
-static inline uint16_t
-dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
-			     const struct qbman_fd *fd,
-			     struct rte_dpaa2_qdma_job **job,
-			     uint16_t *nb_jobs)
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_long *cntx_long = NULL;
+	rte_iova_t cntx_iova, fle_iova, sdd_iova;
 	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t status;
+	struct qdma_sdd *sdd;
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	memset(fd, 0, sizeof(struct qbman_fd));
 
-	*nb_jobs = 1;
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-			QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
+	if (qdma_dev->is_silent) {
+		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	} else {
+		ret = rte_mempool_get(qdma_vq->fle_pool,
+			(void **)&cntx_long);
+		if (ret)
+			return ret;
+		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
+	}
 
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	cntx_iova = rte_mempool_virt2iova(cntx_long);
+#else
+	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
+#endif
 
-	*job = *ppjob;
-	(*job)->status = status;
+	fle = cntx_long->fle_sdd.fle;
+	fle_iova = cntx_iova +
+		offsetof(struct qdma_cntx_long, fle_sdd) +
+		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+
+	if (qdma_vq->fle_pre_populate) {
+		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
+			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+				&qdma_vq->rbp,
+				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
+			if (!qdma_dev->is_silent && cntx_long) {
+				cntx_long->cntx_idx =
+					DPAA2_QDMA_IDX_FROM_FLAG(flags);
+			}
+		}
 
-	return (*job)->vq_id;
-}
+		fle_post_populate(fle, src, dst, length);
+	} else {
+		sdd = cntx_long->fle_sdd.sdd;
+		sdd_iova = cntx_iova +
+			offsetof(struct qdma_cntx_long, fle_sdd) +
+			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
+			src, dst, length,
+			QBMAN_FLE_WORD4_FMT_SBF);
+	}
 
-static inline uint16_t
-dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
-			 const struct qbman_fd *fd,
-			 struct rte_dpaa2_qdma_job **job,
-			 uint16_t *nb_jobs)
-{
-	struct qbman_fle *fle;
-	struct rte_dpaa2_qdma_job **ppjob = NULL;
-	uint16_t i, status;
+	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
+		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
 
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)
-			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	*nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
-	ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
-				QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
-	status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	for (i = 0; i < (*nb_jobs); i++) {
-		job[i] = ppjob[i];
-		job[i]->status = status;
-	}
+	qdma_vq->fd_idx++;
+	qdma_vq->silent_idx =
+		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
 
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_vq->fle_pool,
-			(void *)
-			((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected))
+			return 0;
+	} else {
+		return 0;
+	}
 
-	return job[0]->vq_id;
+	return ret;
 }
 
-/* Function to receive a QDMA job for a given device and queue*/
-static int
-dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
-				     uint16_t *vq_id,
-				     struct rte_dpaa2_qdma_job **job,
-				     uint16_t nb_jobs)
+static uint16_t
+dpaa2_qdma_dequeue(void *dev_private,
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *cntx_idx, bool *has_error)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	struct dpaa2_queue *rxq;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	struct qbman_pull_desc pulldesc;
 	struct qbman_swp *swp;
 	struct queue_storage_info_t *q_storage;
+	uint32_t fqid;
 	uint8_t status, pending;
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
 	int ret, pull_size;
+	struct qbman_fle *fle;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_long *cntx_long;
+	uint16_t free_space = 0, fle_elem_nb = 0;
 
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
+	if (unlikely(qdma_dev->is_silent))
+		return 0;
 
 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
 		ret = dpaa2_affine_qbman_swp();
 		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
+			DPAA2_QDMA_ERR("Allocate portal err, tid(%d)",
 				rte_gettid());
+			if (has_error)
+				*has_error = true;
 			return 0;
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
+	pull_size = (nb_cpls > dpaa2_dqrr_size) ?
+		dpaa2_dqrr_size : nb_cpls;
+	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
+	fqid = rxq->fqid;
 	q_storage = rxq->q_storage;
 
 	if (unlikely(!q_storage->active_dqs)) {
@@ -592,21 +862,20 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->last_num_pkts = pull_size;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      q_storage->last_num_pkts);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+			q_storage->last_num_pkts);
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-				(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
-				get_swp_active_dqs(
-				DPAA2_PER_LCORE_DPIO->index)))
+			       get_swp_active_dqs(
+			       DPAA2_PER_LCORE_DPIO->index)))
 				;
 			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
 		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued.QBMAN busy");
+				DPAA2_QDMA_DP_WARN("QBMAN busy");
 					/* Portal was busy, try again */
 				continue;
 			}
@@ -615,7 +884,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 		q_storage->active_dqs = dq_storage;
 		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
-				   dq_storage);
+			dq_storage);
 	}
 
 	dq_storage = q_storage->active_dqs;
@@ -629,7 +898,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 	dq_storage1 = q_storage->dq_storage[q_storage->toggle];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
-	qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
+	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
 		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
 
@@ -662,563 +931,239 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-
-		vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
-								&num_rx_ret);
-		if (vq_id)
-			vq_id[num_rx] = vqid;
-
-		dq_storage++;
-		num_rx += num_rx_ret;
-	} while (pending);
-
-	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-		while (!qbman_check_command_complete(
-			get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-			;
-		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
-	}
-	/* issue a volatile dequeue command for next pull */
-	while (1) {
-		if (qbman_swp_pull(swp, &pulldesc)) {
-			DPAA2_QDMA_DP_WARN(
-				"VDQ command is not issued. QBMAN is busy (2)");
-			continue;
-		}
-		break;
-	}
-
-	q_storage->active_dqs = dq_storage1;
-	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
-	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
-					uint16_t *vq_id,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
-	struct qbman_result *dq_storage;
-	struct qbman_pull_desc pulldesc;
-	struct qbman_swp *swp;
-	uint8_t status, pending;
-	uint8_t num_rx = 0;
-	const struct qbman_fd *fd;
-	uint16_t vqid, num_rx_ret;
-	uint16_t rx_fqid = rxq->fqid;
-	int ret, next_pull, num_pulled = 0;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-		nb_jobs = 1;
-	}
-
-	next_pull = nb_jobs;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
-		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	rxq = &(dpdmai_dev->rx_queue[0]);
-
-	do {
-		dq_storage = rxq->q_storage->dq_storage[0];
-		/* Prepare dequeue descriptor */
-		qbman_pull_desc_clear(&pulldesc);
-		qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
-		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-
-		if (next_pull > dpaa2_dqrr_size) {
-			qbman_pull_desc_set_numframes(&pulldesc,
-					dpaa2_dqrr_size);
-			next_pull -= dpaa2_dqrr_size;
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		fle = fle_sdd->fle;
+		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
+		fle_elem_nb++;
+		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+			QBMAN_FLE_WORD4_FMT_SGE) {
+			cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, &free_space);
 		} else {
-			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
-			next_pull = 0;
-		}
-
-		while (1) {
-			if (qbman_swp_pull(swp, &pulldesc)) {
-				DPAA2_QDMA_DP_WARN(
-					"VDQ command not issued. QBMAN busy");
-				/* Portal was busy, try again */
-				continue;
-			}
-			break;
-		}
-
-		rte_prefetch0((void *)((size_t)(dq_storage + 1)));
-		/* Check if the previous issued command is completed. */
-		while (!qbman_check_command_complete(dq_storage))
-			;
-
-		num_pulled = 0;
-		pending = 1;
-
-		do {
-			/* Loop until dq_storage is updated
-			 * with new token by QBMAN
-			 */
-			while (!qbman_check_new_result(dq_storage))
-				;
-			rte_prefetch0((void *)((size_t)(dq_storage + 2)));
-
-			if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-				pending = 0;
-				/* Check for valid frame. */
-				status = qbman_result_DQ_flags(dq_storage);
-				if (unlikely((status &
-					QBMAN_DQ_STAT_VALIDFRAME) == 0))
-					continue;
-			}
-			fd = qbman_result_DQ_fd(dq_storage);
-
-			vqid = qdma_vq->get_job(qdma_vq, fd,
-						&job[num_rx], &num_rx_ret);
-			if (vq_id)
-				vq_id[num_rx] = vqid;
-
-			dq_storage++;
-			num_rx += num_rx_ret;
-			num_pulled++;
-
-		} while (pending);
-	/* Last VDQ provided all packets and more packets are requested */
-	} while (next_pull && num_pulled == dpaa2_dqrr_size);
-
-	return num_rx;
-}
-
-static int
-dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
-	uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
-	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
-	struct qbman_eq_desc eqdesc;
-	struct qbman_swp *swp;
-	uint32_t num_to_send = 0;
-	uint16_t num_tx = 0;
-	uint32_t enqueue_loop, loop;
-	int ret;
-
-	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
-		ret = dpaa2_affine_qbman_swp();
-		if (ret) {
-			DPAA2_QDMA_ERR(
-				"Failed to allocate IO portal, tid: %d",
-				rte_gettid());
-			return 0;
+			cntx_long = container_of(fle_sdd,
+				struct qdma_cntx_long, fle_sdd);
+			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&cntx_long->cntx_idx,
+				1, &free_space);
 		}
-	}
-	swp = DPAA2_PER_LCORE_PORTAL;
-
-	/* Prepare enqueue descriptor */
-	qbman_eq_desc_clear(&eqdesc);
-	qbman_eq_desc_set_fq(&eqdesc, txq_id);
-	qbman_eq_desc_set_no_orp(&eqdesc, 0);
-	qbman_eq_desc_set_response(&eqdesc, 0, 0);
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		uint16_t fd_nb;
-		uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		uint16_t job_idx = 0;
-		uint16_t fd_sg_nb[8];
-		uint16_t nb_jobs_ret = 0;
-
-		if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
-		else
-			fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
-
-		memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
-
-		for (loop = 0; loop < fd_nb; loop++) {
-			ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
-					      sg_entry_nb);
-			if (unlikely(ret < 0))
-				return 0;
-			fd_sg_nb[loop] = sg_entry_nb;
-			nb_jobs -= sg_entry_nb;
-			job_idx += sg_entry_nb;
-			sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
-						DPAA2_QDMA_MAX_SG_NB : nb_jobs;
-		}
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-
-		while (enqueue_loop < fd_nb) {
-			ret = qbman_swp_enqueue_multiple(swp,
-					&eqdesc, &fd[enqueue_loop],
-					NULL, fd_nb - enqueue_loop);
-			if (likely(ret >= 0)) {
-				for (loop = 0; loop < (uint32_t)ret; loop++)
-					nb_jobs_ret +=
-						fd_sg_nb[enqueue_loop + loop];
-				enqueue_loop += ret;
-			}
-		}
-
-		return nb_jobs_ret;
-	}
-
-	memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
-
-	while (nb_jobs > 0) {
-		num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
-			dpaa2_eqcr_size : nb_jobs;
-
-		ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
-						&job[num_tx], num_to_send);
-		if (unlikely(ret < 0))
-			break;
-
-		/* Enqueue the packet to the QBMAN */
-		enqueue_loop = 0;
-		loop = num_to_send;
-
-		while (enqueue_loop < loop) {
-			ret = qbman_swp_enqueue_multiple(swp,
-						&eqdesc,
-						&fd[num_tx + enqueue_loop],
-						NULL,
-						loop - enqueue_loop);
-			if (likely(ret >= 0))
-				enqueue_loop += ret;
-		}
-		num_tx += num_to_send;
-		nb_jobs -= loop;
-	}
-
-	qdma_vq->num_enqueues += num_tx;
-
-	return num_tx;
-}
-
-static inline int
-dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
-				qdma_vq->num_valid_jobs);
-
-	qdma_vq->num_valid_jobs = 0;
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
-		   rte_iova_t src, rte_iova_t dst,
-		   uint32_t length, uint64_t flags)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *job;
-	int idx, ret;
-
-	idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
-
-	ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return -ENOSPC;
-	}
-
-	job->src = src;
-	job->dest = dst;
-	job->len = length;
-	job->flags = flags;
-	job->status = 0;
-	job->vq_id = vchan;
-
-	qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
-	qdma_vq->num_valid_jobs++;
-
-	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
-		dpaa2_qdma_submit(dev_private, vchan);
-
-	return idx;
-}
-
-int
-rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-			  struct rte_dpaa2_qdma_job **jobs,
-			  uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-
-	return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
-			 struct qdma_virt_queue *qdma_vq,
-			 struct rte_dpaa2_qdma_job **jobs,
-			 uint16_t nb_jobs)
-{
-	struct qdma_virt_queue *temp_qdma_vq;
-	int ring_count;
-	int ret = 0, i;
-
-	if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
-		/** Make sure there are enough space to get jobs.*/
-		if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
-			return -EINVAL;
-	}
-
-	/* Only dequeue when there are pending jobs on VQ */
-	if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
-		return 0;
-
-	if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
-		qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
-		nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
-				qdma_vq->num_dequeues), nb_jobs);
-
-	if (qdma_vq->exclusive_hw_queue) {
-		/* In case of exclusive queue directly fetch from HW queue */
-		ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
-		if (ret < 0) {
-			DPAA2_QDMA_ERR(
-				"Dequeue from DPDMAI device failed: %d", ret);
-			return ret;
-		}
-	} else {
-		uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
+		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+			pending = 0;
 
-		/* Get the QDMA completed jobs from the software ring.
-		 * In case they are not available on the ring poke the HW
-		 * to fetch completed jobs from corresponding HW queues
-		 */
-		ring_count = rte_ring_count(qdma_vq->status_ring);
-		if (ring_count < nb_jobs) {
-			ret = qdma_vq->dequeue_job(qdma_vq,
-					temp_vq_id, jobs, nb_jobs);
-			for (i = 0; i < ret; i++) {
-				temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
-				rte_ring_enqueue(temp_qdma_vq->status_ring,
-					(void *)(jobs[i]));
-			}
-			ring_count = rte_ring_count(
-					qdma_vq->status_ring);
-		}
+		dq_storage++;
+	} while (pending);
 
-		if (ring_count) {
-			/* Dequeue job from the software ring
-			 * to provide to the user
-			 */
-			ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
-						    (void **)jobs,
-						    ring_count, NULL);
+	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+		while (!qbman_check_command_complete(
+		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+			;
+		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+	}
+	/* issue a volatile dequeue command for next pull */
+	while (1) {
+		if (qbman_swp_pull(swp, &pulldesc)) {
+			DPAA2_QDMA_DP_WARN("QBMAN is busy (2)");
+			continue;
 		}
+		break;
 	}
 
-	qdma_vq->num_dequeues += ret;
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			  const uint16_t nb_cpls,
-			  uint16_t *last_idx,
-			  enum rte_dma_status_code *st)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret, i;
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
-
-	for (i = 0; i < ret; i++)
-		st[i] = jobs[i]->status;
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
-
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
-
-	return ret;
-}
-
-static uint16_t
-dpaa2_qdma_dequeue(void *dev_private,
-		   uint16_t vchan, const uint16_t nb_cpls,
-		   uint16_t *last_idx, bool *has_error)
-{
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
-	struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
-	int ret;
-
-	RTE_SET_USED(has_error);
-
-	ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
-				jobs, nb_cpls);
-
-	rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
+	q_storage->active_dqs = dq_storage1;
+	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	if (last_idx != NULL)
-		*last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
+	rte_mempool_put_bulk(qdma_vq->fle_pool,
+		qdma_vq->fle_elem, fle_elem_nb);
 
-	return ret;
-}
+	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
+		cntx_idx, nb_cpls);
 
-uint16_t
-rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-			       struct rte_dpaa2_qdma_job **jobs,
-			       uint16_t nb_cpls)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+	if (has_error)
+		*has_error = false;
 
-	return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
+	return num_rx;
 }
 
 static int
 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
-		    struct rte_dma_info *dev_info,
-		    uint32_t info_sz)
+	struct rte_dma_info *dev_info,
+	uint32_t info_sz __rte_unused)
 {
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 
 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+		RTE_DMA_CAPA_MEM_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_DEV |
+		RTE_DMA_CAPA_DEV_TO_MEM |
+		RTE_DMA_CAPA_SILENT |
+		RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->dev_name = dev->device->name;
+	if (dpdmai_dev->qdma_dev)
+		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
 
 	return 0;
 }
 
 static int
 dpaa2_qdma_configure(struct rte_dma_dev *dev,
-		     const struct rte_dma_conf *dev_conf,
-		     uint32_t conf_sz)
+	const struct rte_dma_conf *dev_conf,
+	uint32_t conf_sz)
 {
-	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint16_t i;
+	struct dpdmai_rx_queue_cfg rx_queue_cfg;
+	struct dpdmai_rx_queue_attr rx_attr;
+	struct dpdmai_tx_queue_attr tx_attr;
+	struct dpaa2_queue *rxq;
+	int ret = 0;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
 	RTE_SET_USED(conf_sz);
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before config.");
-		return -1;
-	}
+	if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+		DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+			dev->data->dev_name, dev_conf->nb_vchans,
+			dpdmai_dev->num_queues);
 
-	/* Allocate Virtual Queues */
-	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
-	qdma_dev->vqs = rte_malloc(name,
-			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
-			RTE_CACHE_LINE_SIZE);
-	if (!qdma_dev->vqs) {
-		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
-		return -ENOMEM;
+		return -ENOTSUP;
 	}
-	qdma_dev->num_vqs = dev_conf->nb_vchans;
-
-	return 0;
-}
-
-static int
-check_devargs_handler(__rte_unused const char *key,
-		      const char *value,
-		      __rte_unused void *opaque)
-{
-	if (strcmp(value, "1"))
-		return -1;
 
-	return 0;
-}
+	if (qdma_dev->vqs) {
+		DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+			dev->data->dev_name,
+			qdma_dev->num_vqs, dev_conf->nb_vchans);
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if ((qdma_dev->vqs[i].num_enqueues !=
+				qdma_dev->vqs[i].num_dequeues) &&
+				!qdma_dev->is_silent) {
+				DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+					i, qdma_dev->vqs[i].num_enqueues -
+					qdma_dev->vqs[i].num_dequeues);
+				return -EBUSY;
+			}
+		}
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+			rxq = &dpdmai_dev->rx_queue[i];
+			if (rxq->q_storage) {
+				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+					dev->data->dev_name, i);
+				dpaa2_free_dq_storage(rxq->q_storage);
+				rte_free(rxq->q_storage);
+				rxq->q_storage = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
+		qdma_dev->num_vqs = 0;
+	}
 
-static int
-dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
-{
-	struct rte_kvargs *kvlist;
+	/* Set up Rx Queues */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+		rxq = &dpdmai_dev->rx_queue[i];
+		ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+				CMD_PRI_LOW,
+				dpdmai_dev->token,
+				i, 0, &rx_queue_cfg);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+				dev->data->dev_name, i, ret);
+			return ret;
+		}
+	}
 
-	if (!devargs)
-		return 0;
+	/* Get Rx and Tx queues FQID's */
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &rx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
 
-	kvlist = rte_kvargs_parse(devargs->args, NULL);
-	if (!kvlist)
-		return 0;
+		ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+				dpdmai_dev->token, i, 0, &tx_attr);
+		if (ret) {
+			DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+				dpdmai_dev->dpdmai_id, i, ret);
+			return ret;
+		}
+		dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+	}
 
-	if (!rte_kvargs_count(kvlist, key)) {
-		rte_kvargs_free(kvlist);
-		return 0;
+	/* Allocate Virtual Queues */
+	qdma_dev->vqs = rte_zmalloc(NULL,
+		(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+		RTE_CACHE_LINE_SIZE);
+	if (!qdma_dev->vqs) {
+		DPAA2_QDMA_ERR("%s: VQs(%d) alloc failed.",
+			dev->data->dev_name, dev_conf->nb_vchans);
+		return -ENOMEM;
 	}
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		qdma_dev->vqs[i].vq_id = i;
+		rxq = &dpdmai_dev->rx_queue[i];
+		/* Allocate DQ storage for the DPDMAI Rx queues */
+		rxq->q_storage = rte_zmalloc(NULL,
+			sizeof(struct queue_storage_info_t),
+			RTE_CACHE_LINE_SIZE);
+		if (!rxq->q_storage) {
+			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 
-	if (rte_kvargs_process(kvlist, key,
-			       check_devargs_handler, NULL) < 0) {
-		rte_kvargs_free(kvlist);
-		return 0;
+		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+		if (ret) {
+			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+				dev->data->dev_name, i);
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
 	}
-	rte_kvargs_free(kvlist);
 
-	return 1;
-}
+	qdma_dev->num_vqs = dev_conf->nb_vchans;
+	qdma_dev->is_silent = dev_conf->enable_silent;
 
-/* Enable FD in Ultra Short format */
-void
-rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	return 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
-}
+alloc_failed:
+	for (i = 0; i < dev_conf->nb_vchans; i++) {
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
 
-/* Enable internal SG processing */
-void
-rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
-{
-	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
-	struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	rte_free(qdma_dev->vqs);
+	qdma_dev->vqs = NULL;
+	qdma_dev->num_vqs = 0;
 
-	qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
+	return ret;
 }
 
 static int
@@ -1257,16 +1202,14 @@ dpaa2_qdma_vchan_rbp_set(struct qdma_virt_queue *vq,
 
 static int
 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
-		       const struct rte_dma_vchan_conf *conf,
-		       uint32_t conf_sz)
+	const struct rte_dma_vchan_conf *conf,
+	uint32_t conf_sz)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	uint32_t pool_size;
-	char ring_name[32];
 	char pool_name[64];
-	int fd_long_format = 1;
-	int sg_enable = 0, ret;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1276,99 +1219,67 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	if (ret)
 		return ret;
 
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
-		sg_enable = 1;
-
-	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
-		fd_long_format = 0;
-
-	if (dev->data->dev_conf.enable_silent)
-		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
-
-	if (sg_enable) {
-		if (qdma_dev->num_vqs != 1) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports physical queue!");
-			return -ENODEV;
-		}
-		if (!fd_long_format) {
-			DPAA2_QDMA_ERR(
-				"qDMA SG format only supports long FD format!");
-			return -ENODEV;
-		}
-		pool_size = QDMA_FLE_SG_POOL_SIZE;
-	} else {
-		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_FLE_PRE_POPULATE))
+		qdma_dev->vqs[vchan].fle_pre_populate = 1;
+	else
+		qdma_dev->vqs[vchan].fle_pre_populate = 0;
 
-	if (qdma_dev->num_vqs == 1)
-		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
-	else {
-		/* Allocate a Ring for Virtual Queue in VQ mode */
-		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
-			 dev->data->dev_id, vchan);
-		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
-			conf->nb_desc, rte_socket_id(), 0);
-		if (!qdma_dev->vqs[vchan].status_ring) {
-			DPAA2_QDMA_ERR("Status ring creation failed for vq");
-			return rte_errno;
-		}
-	}
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_DESC_DEBUG))
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_DESC_DEBUG_FLAG;
+	else
+		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
+			    sizeof(struct qdma_cntx_long));
+
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+			DPAA2_QDMA_MAX_DESC * 2, pool_size,
+			512, 0, NULL, NULL, NULL, NULL,
+			SOCKET_ID_ANY, 0);
 	if (!qdma_dev->vqs[vchan].fle_pool) {
-		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
-		return -ENOMEM;
-	}
-
-	snprintf(pool_name, sizeof(pool_name),
-		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
-			conf->nb_desc, pool_size,
-			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
-			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
-	if (!qdma_dev->vqs[vchan].job_pool) {
-		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
 
-	if (fd_long_format) {
-		if (sg_enable) {
-			qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
-		} else {
-			if (dev->data->dev_conf.enable_silent)
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf_no_rsp;
-			else
-				qdma_dev->vqs[vchan].set_fd =
-					dpdmai_dev_set_multi_fd_lf;
-			qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
+	if (qdma_dev->is_silent) {
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_sg,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("sg cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
+		}
+		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
+				(void **)qdma_dev->vqs[vchan].cntx_long,
+				DPAA2_QDMA_MAX_DESC);
+		if (ret) {
+			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
+				       pool_name);
+			return ret;
 		}
 	} else {
-		qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
-		qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
-	}
-
-	if (dpaa2_qdma_get_devargs(dev->device->devargs,
-			DPAA2_QDMA_PREFETCH)) {
-		/* If no prefetch is configured. */
-		qdma_dev->vqs[vchan].dequeue_job =
-				dpdmai_dev_dequeue_multijob_prefetch;
-		DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
-	} else {
-		qdma_dev->vqs[vchan].dequeue_job =
-			dpdmai_dev_dequeue_multijob_no_prefetch;
+		qdma_dev->vqs[vchan].ring_cntx_idx = rte_malloc(NULL,
+				sizeof(struct qdma_cntx_idx_ring),
+				RTE_CACHE_LINE_SIZE);
+		if (!qdma_dev->vqs[vchan].ring_cntx_idx) {
+			DPAA2_QDMA_ERR("DQ response ring alloc failed.");
+			return -ENOMEM;
+		}
+		qdma_dev->vqs[vchan].ring_cntx_idx->start = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->tail = 0;
+		qdma_dev->vqs[vchan].ring_cntx_idx->free_space =
+				QDMA_CNTX_IDX_RING_MAX_FREE;
+		qdma_dev->vqs[vchan].ring_cntx_idx->nb_in_ring = 0;
+		qdma_dev->vqs[vchan].fle_elem = rte_malloc(NULL,
+				sizeof(void *) * DPAA2_QDMA_MAX_DESC,
+				RTE_CACHE_LINE_SIZE);
 	}
 
 	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
 	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
-	qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
 
 	return 0;
 }
@@ -1377,11 +1288,17 @@ static int
 dpaa2_qdma_start(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 1;
+	/* Enable the device */
+	ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
@@ -1390,46 +1307,71 @@ static int
 dpaa2_qdma_stop(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	qdma_dev->state = 0;
+	/* Disable the device */
+	ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+		return ret;
+	}
 
 	return 0;
 }
 
 static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct dpaa2_queue *rxq;
 	int i;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* In case QDMA device is not in stopped state, return -EBUSY */
-	if (qdma_dev->state == 1) {
-		DPAA2_QDMA_ERR(
-			"Device is in running state. Stop before reset.");
-		return -EBUSY;
-	}
+	if (!qdma_dev)
+		return 0;
 
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
-		    qdma_dev->vqs[i].num_dequeues)) {
-			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+		if ((qdma_dev->vqs[i].num_enqueues !=
+		    qdma_dev->vqs[i].num_dequeues) &&
+		    !qdma_dev->is_silent) {
+			DPAA2_QDMA_ERR("VQ(%d) pending: eq(%"PRIu64") != dq(%"PRId64")",
+				i, qdma_dev->vqs[i].num_enqueues,
+				qdma_dev->vqs[i].num_dequeues);
 			return -EBUSY;
 		}
 	}
 
-	/* Reset and free virtual queues */
+	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
-		rte_ring_free(qdma_dev->vqs[i].status_ring);
+		rxq = &dpdmai_dev->rx_queue[i];
+		if (rxq->q_storage) {
+			dpaa2_free_dq_storage(rxq->q_storage);
+			rte_free(rxq->q_storage);
+			rxq->q_storage = NULL;
+		}
+	}
+
+	if (qdma_dev->vqs) {
+		/* Free RXQ fle pool */
+		for (i = 0; i < qdma_dev->num_vqs; i++) {
+			if (qdma_dev->vqs[i].fle_pool) {
+				rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+				qdma_dev->vqs[i].fle_pool = NULL;
+			}
+			if (qdma_dev->vqs[i].ring_cntx_idx) {
+				rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+				qdma_dev->vqs[i].ring_cntx_idx = NULL;
+			}
+		}
+		rte_free(qdma_dev->vqs);
+		qdma_dev->vqs = NULL;
 	}
-	rte_free(qdma_dev->vqs);
-	qdma_dev->vqs = NULL;
 
 	/* Reset QDMA device structure */
 	qdma_dev->num_vqs = 0;
@@ -1438,18 +1380,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 }
 
 static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
-	DPAA2_QDMA_FUNC_TRACE();
-
-	dpaa2_qdma_reset(dev);
-
-	return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
 	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1504,123 +1436,44 @@ static int
 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
 	int ret;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			     dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("dmdmai disable failed");
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
+			dpdmai_dev->dpdmai_id);
+		return 0;
+	}
 
-	/* Set up the DQRR storage for Rx */
-	struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
+	/* Close the device at underlying layer*/
+	ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+			dpdmai_dev->dpdmai_id, ret);
 
-	if (rxq->q_storage) {
-		dpaa2_free_dq_storage(rxq->q_storage);
-		rte_free(rxq->q_storage);
+		return ret;
 	}
 
-	/* Close the device at underlying layer*/
-	ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
-	if (ret)
-		DPAA2_QDMA_ERR("Failure closing dpdmai device");
+	if (qdma_dev) {
+		rte_free(qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
 
-	return 0;
+	return ret;
 }
 
 static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
 {
 	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
-	struct dpdmai_rx_queue_cfg rx_queue_cfg;
 	struct dpdmai_attr attr;
-	struct dpdmai_rx_queue_attr rx_attr;
-	struct dpdmai_tx_queue_attr tx_attr;
-	struct dpaa2_queue *rxq;
-	int ret;
+	int ret, err;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
-	/* Open DPDMAI device */
-	dpdmai_dev->dpdmai_id = dpdmai_id;
-	dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
-	dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
-					  RTE_CACHE_LINE_SIZE);
-	ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			  dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
-		return ret;
-	}
-
-	/* Get DPDMAI attributes */
-	ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				    dpdmai_dev->token, &attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->num_queues = attr.num_of_queues;
-
-	/* Set up Rx Queue */
-	memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
-	ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
-				  CMD_PRI_LOW,
-				  dpdmai_dev->token,
-				  0, 0, &rx_queue_cfg);
-	if (ret) {
-		DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-
-	/* Allocate DQ storage for the DPDMAI Rx queues */
-	rxq = &(dpdmai_dev->rx_queue[0]);
-	rxq->q_storage = rte_malloc("dq_storage",
-				    sizeof(struct queue_storage_info_t),
-				    RTE_CACHE_LINE_SIZE);
-	if (!rxq->q_storage) {
-		DPAA2_QDMA_ERR("q_storage allocation failed");
-		ret = -ENOMEM;
-		goto init_err;
-	}
-
-	memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-	ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-	if (ret) {
-		DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
-		goto init_err;
-	}
-
-	/* Get Rx and Tx queues FQID */
-	ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &rx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
-
-	ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-				  dpdmai_dev->token, 0, 0, &tx_attr);
-	if (ret) {
-		DPAA2_QDMA_ERR("Reading device failed with err: %d",
-			       ret);
-		goto init_err;
-	}
-	dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
-
-	/* Enable the device */
-	ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
-			    dpdmai_dev->token);
-	if (ret) {
-		DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
-		goto init_err;
-	}
-
 	if (!dpaa2_coherent_no_alloc_cache) {
 		if (dpaa2_svr_family == SVR_LX2160A) {
 			dpaa2_coherent_no_alloc_cache =
@@ -1635,24 +1488,76 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
 		}
 	}
 
-	DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+	if (!s_proc_mc_reg.regs)
+		s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+			dpdmai_id);
+		if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+			DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+				dpdmai_id, dpdmai_dev->dpdmai_id);
+			return -EINVAL;
+		}
+		if (!dpdmai_dev->qdma_dev) {
+			DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+				dpdmai_id);
+			return -ENOMEM;
+		}
+		if (dpdmai_dev->qdma_dev->num_vqs) {
+			DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+				dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+		}
+
+		return 0;
+	}
+
+	/* Open DPDMAI device */
+	dpdmai_dev->dpdmai_id = dpdmai_id;
+
+	if (dpdmai_dev->qdma_dev) {
+		rte_free(dpdmai_dev->qdma_dev);
+		dpdmai_dev->qdma_dev = NULL;
+	}
+	dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
+		sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
+	if (!dpdmai_dev->qdma_dev) {
+		DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+			dpdmai_id);
+		return -ENOMEM;
+	}
+	ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+	if (ret) {
+		DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		return ret;
+	}
 
-	/* Reset the QDMA device */
-	ret = dpaa2_qdma_reset(dev);
+	/* Get DPDMAI attributes */
+	ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token, &attr);
 	if (ret) {
-		DPAA2_QDMA_ERR("Resetting QDMA failed");
-		goto init_err;
+		DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
+			__func__, dpdmai_dev->dpdmai_id, ret);
+		err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+			dpdmai_dev->token);
+		if (err) {
+			DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+				dpdmai_dev->dpdmai_id, err);
+		}
+		return ret;
 	}
+	dpdmai_dev->num_queues = attr.num_of_queues;
+
+	DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
 
 	return 0;
-init_err:
-	dpaa2_dpdmai_dev_uninit(dev);
-	return ret;
 }
 
 static int
 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
-		 struct rte_dpaa2_device *dpaa2_dev)
+	struct rte_dpaa2_device *dpaa2_dev)
 {
 	struct rte_dma_dev *dmadev;
 	int ret;
@@ -1662,8 +1567,8 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	RTE_SET_USED(dpaa2_drv);
 
 	dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
-				      rte_socket_id(),
-				      sizeof(struct dpaa2_dpdmai_dev));
+		rte_socket_id(),
+		sizeof(struct dpaa2_dpdmai_dev));
 	if (!dmadev) {
 		DPAA2_QDMA_ERR("Unable to allocate dmadevice");
 		return -EINVAL;
@@ -1673,10 +1578,10 @@ dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
 	dmadev->dev_ops = &dpaa2_qdma_ops;
 	dmadev->device = &dpaa2_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
-	dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
+	dmadev->fp_obj->copy = dpaa2_qdma_copy;
+	dmadev->fp_obj->copy_sg = dpaa2_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa2_qdma_submit;
 	dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
-	dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
 	dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
@@ -1718,5 +1623,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
-	"no_prefetch=<int> ");
+	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
+	DPAA2_QDMA_DESC_DEBUG"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 811906fcbc..371393cb85 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,11 +1,14 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
 #define _DPAA2_QDMA_H_
 
-#define DPAA2_QDMA_MAX_DESC		1024
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+#define DPAA2_QDMA_MAX_DESC		4096
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
@@ -13,48 +16,9 @@
 #define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
 #define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
 
-#define DPAA2_QDMA_MAX_FLE 3
-#define DPAA2_QDMA_MAX_SDD 2
-
-#define DPAA2_QDMA_MAX_SG_NB 64
-
-#define DPAA2_DPDMAI_MAX_QUEUES	1
-
-/** FLE single job pool size: job pointer(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor.
- */
-#define QDMA_FLE_SINGLE_POOL_SIZE (sizeof(uint64_t) + \
-			sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-			sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-/** FLE sg jobs pool size: job number(uint64_t) +
- * 3 Frame list + 2 source/destination descriptor  +
- * 64 (src + dst) sg entries + 64 jobs pointers.
- */
-#define QDMA_FLE_SG_POOL_SIZE (sizeof(uint64_t) + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD + \
-		sizeof(struct qdma_sg_entry) * (DPAA2_QDMA_MAX_SG_NB * 2) + \
-		sizeof(struct rte_qdma_job *) * DPAA2_QDMA_MAX_SG_NB)
-
-#define QDMA_FLE_JOB_NB_OFFSET 0
+#define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_FLE_SINGLE_JOB_OFFSET 0
-
-#define QDMA_FLE_FLE_OFFSET \
-		(QDMA_FLE_JOB_NB_OFFSET + sizeof(uint64_t))
-
-#define QDMA_FLE_SDD_OFFSET \
-		(QDMA_FLE_FLE_OFFSET + \
-		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE)
-
-#define QDMA_FLE_SG_ENTRY_OFFSET \
-		(QDMA_FLE_SDD_OFFSET + \
-		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
-
-#define QDMA_FLE_SG_JOBS_OFFSET \
-		(QDMA_FLE_SG_ENTRY_OFFSET + \
-		sizeof(struct qdma_sg_entry) * DPAA2_QDMA_MAX_SG_NB * 2)
+#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
 
 /** FLE pool cache size */
 #define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
@@ -202,12 +166,49 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum {
+	DPAA2_QDMA_SDD_FLE,
+	DPAA2_QDMA_SRC_FLE,
+	DPAA2_QDMA_DST_FLE,
+	DPAA2_QDMA_MAX_FLE
+};
+
+enum {
+	DPAA2_QDMA_SRC_SDD,
+	DPAA2_QDMA_DST_SDD,
+	DPAA2_QDMA_MAX_SDD
+};
+
+struct qdma_cntx_fle_sdd {
+	struct qbman_fle fle[DPAA2_QDMA_MAX_FLE];
+	struct qdma_sdd sdd[DPAA2_QDMA_MAX_SDD];
+} __rte_packed;
+
+struct qdma_cntx_sg {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	uint16_t job_nb;
+	uint16_t rsv[3];
+} __rte_packed;
+
+struct qdma_cntx_long {
+	struct qdma_cntx_fle_sdd fle_sdd;
+	uint16_t cntx_idx;
+	uint16_t rsv[3];
+} __rte_packed;
+
+#define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+
+#define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
 	/** Pointer to Next device instance */
 	TAILQ_ENTRY(dpaa2_qdma_device) next;
-	/** handle to DPDMAI object */
-	struct fsl_mc_io dpdmai;
 	/** HW ID for DPDMAI object */
 	uint32_t dpdmai_id;
 	/** Tocken of this device */
@@ -221,42 +222,30 @@ struct dpaa2_dpdmai_dev {
 	struct qdma_device *qdma_dev;
 };
 
-struct qdma_virt_queue;
-
-typedef uint16_t (qdma_get_job_t)(struct qdma_virt_queue *qdma_vq,
-					const struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t *nb_jobs);
-typedef int (qdma_set_fd_t)(struct qdma_virt_queue *qdma_vq,
-					struct qbman_fd *fd,
-					struct rte_dpaa2_qdma_job **job,
-					uint16_t nb_jobs);
-
-typedef int (qdma_dequeue_multijob_t)(
-				struct qdma_virt_queue *qdma_vq,
-				uint16_t *vq_id,
-				struct rte_dpaa2_qdma_job **job,
-				uint16_t nb_jobs);
+#define QDMA_CNTX_IDX_RING_EXTRA_SPACE 64
+#define QDMA_CNTX_IDX_RING_MAX_FREE \
+	(DPAA2_QDMA_MAX_DESC - QDMA_CNTX_IDX_RING_EXTRA_SPACE)
+struct qdma_cntx_idx_ring {
+	uint16_t cntx_idx_ring[DPAA2_QDMA_MAX_DESC];
+	uint16_t start;
+	uint16_t tail;
+	uint16_t free_space;
+	uint16_t nb_in_ring;
+};
 
-typedef int (qdma_enqueue_multijob_t)(
-			struct qdma_virt_queue *qdma_vq,
-			struct rte_dpaa2_qdma_job **job,
-			uint16_t nb_jobs);
+#define DPAA2_QDMA_DESC_DEBUG_FLAG (1 << 0)
 
 /** Represents a QDMA virtual queue */
 struct qdma_virt_queue {
-	/** Status ring of the virtual queue */
-	struct rte_ring *status_ring;
 	/** Associated hw queue */
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
 	/** States if this vq is in use or not */
-	uint8_t in_use;
-	/** States if this vq has exclusively associated hw queue */
-	uint8_t exclusive_hw_queue;
+	uint8_t fle_pre_populate;
 	/** Number of descriptor for the virtual DMA channel */
 	uint16_t nb_desc;
 	/* Total number of enqueues on this VQ */
@@ -266,18 +255,18 @@ struct qdma_virt_queue {
 
 	uint16_t vq_id;
 	uint32_t flags;
+	struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
+	uint16_t fd_idx;
+	struct qdma_cntx_idx_ring *ring_cntx_idx;
+
+	/**Used for silent enabled*/
+	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	uint16_t silent_idx;
 
-	struct rte_dpaa2_qdma_job *job_list[DPAA2_QDMA_MAX_DESC];
-	struct rte_mempool *job_pool;
 	int num_valid_jobs;
 
 	struct rte_dma_stats stats;
-
-	qdma_set_fd_t *set_fd;
-	qdma_get_job_t *get_job;
-
-	qdma_dequeue_multijob_t *dequeue_job;
-	qdma_enqueue_multijob_t *enqueue_job;
 };
 
 /** Represents a QDMA device. */
@@ -286,8 +275,7 @@ struct qdma_device {
 	struct qdma_virt_queue *vqs;
 	/** Total number of VQ's */
 	uint16_t num_vqs;
-	/** Device state - started or stopped */
-	uint8_t state;
+	uint8_t is_silent;
 };
 
 #endif /* _DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index b0bf9d8bcc..e49604c8fc 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2023 NXP
  */
 
 #ifndef _RTE_PMD_DPAA2_QDMA_H_
@@ -7,118 +7,30 @@
 
 #include <rte_compat.h>
 
-/** States if the source addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_SRC_PHY		(1ULL << 30)
+#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
+#define RTE_DPAA2_QDMA_LEN_MASK \
+	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/** States if the destination addresses is physical. */
-#define RTE_DPAA2_QDMA_JOB_DEST_PHY		(1ULL << 31)
+#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
+	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
 
-/** Determines a QDMA job */
-struct rte_dpaa2_qdma_job {
-	/** Source Address from where DMA is (to be) performed */
-	uint64_t src;
-	/** Destination Address where DMA is (to be) done */
-	uint64_t dest;
-	/** Length of the DMA operation in bytes. */
-	uint32_t len;
-	/** See RTE_QDMA_JOB_ flags */
-	uint32_t flags;
-	/**
-	 * Status of the transaction.
-	 * This is filled in the dequeue operation by the driver.
-	 * upper 8bits acc_err for route by port.
-	 * lower 8bits fd error
-	 */
-	uint16_t status;
-	uint16_t vq_id;
-	uint64_t cnxt;
-	/**
-	 * FLE pool element maintained by user, in case no qDMA response.
-	 * Note: the address must be allocated from DPDK memory pool.
-	 */
-	void *usr_elem;
-};
+#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
+	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable FD in Ultra Short format on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
+	((length) & RTE_DPAA2_QDMA_LEN_MASK)
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enable internal SG processing on a channel. This API should be
- * called before calling 'rte_dma_vchan_setup()' API.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- */
-__rte_experimental
-void rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan);
+#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
 
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Enqueue a copy operation onto the virtual DMA channel for silent mode,
- * when dequeue is not required.
- *
- * This queues up a copy operation to be performed by hardware, if the 'flags'
- * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
- * this operation, otherwise do not trigger doorbell.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs to be submitted to QDMA.
- * @param nb_cpls
- *   Number of DMA jobs.
- *
- * @return
- *   - >= 0..Number of enqueued job.
- *   - -ENOSPC: if no space left to enqueue.
- *   - other values < 0 on failure.
- */
-__rte_experimental
-int rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Return the number of operations that have been successfully completed.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param vchan
- *   The identifier of virtual DMA channel.
- * @param jobs
- *   Jobs completed by QDMA.
- * @param nb_cpls
- *   Number of completed DMA jobs.
- *
- * @return
- *   The number of operations that successfully completed. This return value
- *   must be less than or equal to the value of nb_cpls.
- */
-__rte_experimental
-uint16_t rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
-		struct rte_dpaa2_qdma_job **jobs, uint16_t nb_cpls);
+#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
 
+#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
+#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
 #endif /* _RTE_PMD_DPAA2_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/version.map b/drivers/dma/dpaa2/version.map
deleted file mode 100644
index 43e8b2d5c5..0000000000
--- a/drivers/dma/dpaa2/version.map
+++ /dev/null
@@ -1,13 +0,0 @@
-DPDK_25 {
-	local: *;
-};
-
-EXPERIMENTAL {
-	global:
-
-	# added in 22.07
-	rte_dpaa2_qdma_completed_multi;
-	rte_dpaa2_qdma_copy_multi;
-	rte_dpaa2_qdma_vchan_fd_us_enable;
-	rte_dpaa2_qdma_vchan_internal_sg_enable;
-};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 03/15] bus/fslmc: enhance the qbman dq storage logic
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
  2024-10-15  7:13                   ` [v7 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
  2024-10-15  7:13                   ` [v7 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 04/15] dma/dpaa2: add short FD support Gagandeep Singh
                                     ` (11 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Multiple DQ storages are used among multiple cores, the single dq
storage of first union is leak if multiple storages are allocated.
It does not make sense to keep the single dq storage of union,
remove it and reuse the first dq storage of multiple storages
for this case.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpci.c    | 25 ++-----
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  7 +-
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     | 38 +++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 23 ++----
 drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c |  4 +-
 drivers/dma/dpaa2/dpaa2_qdma.c              | 41 ++---------
 drivers/net/dpaa2/dpaa2_ethdev.c            | 81 ++++++++-------------
 drivers/net/dpaa2/dpaa2_rxtx.c              | 19 +++--
 drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c       |  4 +-
 9 files changed, 102 insertions(+), 140 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index 7e858a113f..160126f6d6 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -81,22 +81,10 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 		}
 
 		/* Allocate DQ storage for the DPCI Rx queues */
-		rxq = &(dpci_node->rx_queue[i]);
-		rxq->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_BUS_ERR("q_storage allocation failed");
-			ret = -ENOMEM;
+		rxq = &dpci_node->rx_queue[i];
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto err;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed");
-			goto err;
-		}
 	}
 
 	/* Enable the device */
@@ -141,12 +129,9 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
 
 err:
 	for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
-		struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+		struct dpaa2_queue *rxq = &dpci_node->rx_queue[i];
 
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 	rte_free(dpci_node);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 4aec7b2cd8..a8afc772fd 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -574,6 +574,7 @@ dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
 	}
 }
 
@@ -583,7 +584,7 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	int i = 0;
 
 	for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
-		q_storage->dq_storage[i] = rte_malloc(NULL,
+		q_storage->dq_storage[i] = rte_zmalloc(NULL,
 			dpaa2_dqrr_size * sizeof(struct qbman_result),
 			RTE_CACHE_LINE_SIZE);
 		if (!q_storage->dq_storage[i])
@@ -591,8 +592,10 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
 	}
 	return 0;
 fail:
-	while (--i >= 0)
+	while (--i >= 0) {
 		rte_free(q_storage->dq_storage[i]);
+		q_storage->dq_storage[i] = NULL;
+	}
 
 	return -1;
 }
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 4c30e6db18..0e53ab9d8f 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -165,7 +165,9 @@ struct __rte_cache_aligned dpaa2_queue {
 	uint64_t tx_pkts;
 	uint64_t err_pkts;
 	union {
-		struct queue_storage_info_t *q_storage;
+		/**Ingress*/
+		struct queue_storage_info_t *q_storage[RTE_MAX_LCORE];
+		/**Egress*/
 		struct qbman_result *cscn;
 	};
 	struct rte_event ev;
@@ -187,6 +189,38 @@ struct swp_active_dqs {
 	uint64_t reserved[7];
 };
 
+#define dpaa2_queue_storage_alloc(q, num) \
+({ \
+	int ret = 0, i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		(q)->q_storage[i] = rte_zmalloc(NULL, \
+			sizeof(struct queue_storage_info_t), \
+			RTE_CACHE_LINE_SIZE); \
+		if (!(q)->q_storage[i]) { \
+			ret = -ENOBUFS; \
+			break; \
+		} \
+		ret = dpaa2_alloc_dq_storage((q)->q_storage[i]); \
+		if (ret) \
+			break; \
+	} \
+	ret; \
+})
+
+#define dpaa2_queue_storage_free(q, num) \
+({ \
+	int i; \
+	\
+	for (i = 0; i < (num); i++) { \
+		if ((q)->q_storage[i]) { \
+			dpaa2_free_dq_storage((q)->q_storage[i]); \
+			rte_free((q)->q_storage[i]); \
+			(q)->q_storage[i] = NULL; \
+		} \
+	} \
+})
+
 #define NUM_MAX_SWP 64
 
 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index b34183d594..fda08ba0b1 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1932,7 +1932,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
@@ -2023,10 +2023,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
 
 	PMD_INIT_FUNC_TRACE();
 
-	if (qp->rx_vq.q_storage) {
-		dpaa2_free_dq_storage(qp->rx_vq.q_storage);
-		rte_free(qp->rx_vq.q_storage);
-	}
+	dpaa2_queue_storage_free(&qp->rx_vq, 1);
 	rte_mempool_free(qp->fle_pool);
 	rte_free(qp);
 
@@ -2077,18 +2074,10 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp->rx_vq.crypto_data = dev->data;
 	qp->tx_vq.crypto_data = dev->data;
-	qp->rx_vq.q_storage = rte_malloc("sec dq storage",
-		sizeof(struct queue_storage_info_t),
-		RTE_CACHE_LINE_SIZE);
-	if (!qp->rx_vq.q_storage) {
-		DPAA2_SEC_ERR("malloc failed for q_storage");
-		return -ENOMEM;
-	}
-	memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
-
-	if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
-		DPAA2_SEC_ERR("Unable to allocate dequeue storage");
-		return -ENOMEM;
+	retcode = dpaa2_queue_storage_alloc((&qp->rx_vq), 1);
+	if (retcode) {
+		dpaa2_queue_storage_free((&qp->rx_vq), 1);
+		return retcode;
 	}
 
 	dev->data->queue_pairs[qp_id] = qp;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
index 883584a6e2..fb0408f8ad 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2022 NXP
+ * Copyright 2021-2022, 2024 NXP
  */
 
 #include <cryptodev_pmd.h>
@@ -853,7 +853,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+	dq_storage = dpaa2_qp->rx_vq.q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_numframes(&pulldesc,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index e364cc8f3d..ee110741b7 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -854,7 +854,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		dpaa2_dqrr_size : nb_cpls;
 	rxq = &(dpdmai_dev->rx_queue[qdma_vq->vq_id]);
 	fqid = rxq->fqid;
-	q_storage = rxq->q_storage;
+	q_storage = rxq->q_storage[0];
 
 	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
@@ -1062,13 +1062,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 				qdma_dev->vqs[i].ring_cntx_idx = NULL;
 			}
 			rxq = &dpdmai_dev->rx_queue[i];
-			if (rxq->q_storage) {
-				DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
-					dev->data->dev_name, i);
-				dpaa2_free_dq_storage(rxq->q_storage);
-				rte_free(rxq->q_storage);
-				rxq->q_storage = NULL;
-			}
+			dpaa2_queue_storage_free(rxq, 1);
 		}
 		rte_free(qdma_dev->vqs);
 		qdma_dev->vqs = NULL;
@@ -1124,24 +1118,9 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 		qdma_dev->vqs[i].vq_id = i;
 		rxq = &dpdmai_dev->rx_queue[i];
 		/* Allocate DQ storage for the DPDMAI Rx queues */
-		rxq->q_storage = rte_zmalloc(NULL,
-			sizeof(struct queue_storage_info_t),
-			RTE_CACHE_LINE_SIZE);
-		if (!rxq->q_storage) {
-			DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
-			goto alloc_failed;
-		}
-
-		memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
-		ret = dpaa2_alloc_dq_storage(rxq->q_storage);
-		if (ret) {
-			DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
-				dev->data->dev_name, i);
-			ret = -ENOMEM;
+		ret = dpaa2_queue_storage_alloc(rxq, 1);
+		if (ret)
 			goto alloc_failed;
-		}
 	}
 
 	qdma_dev->num_vqs = dev_conf->nb_vchans;
@@ -1152,11 +1131,7 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
 alloc_failed:
 	for (i = 0; i < dev_conf->nb_vchans; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	rte_free(qdma_dev->vqs);
@@ -1350,11 +1325,7 @@ dpaa2_qdma_close(struct rte_dma_dev *dev)
 	/* Free RXQ storages */
 	for (i = 0; i < qdma_dev->num_vqs; i++) {
 		rxq = &dpdmai_dev->rx_queue[i];
-		if (rxq->q_storage) {
-			dpaa2_free_dq_storage(rxq->q_storage);
-			rte_free(rxq->q_storage);
-			rxq->q_storage = NULL;
-		}
+		dpaa2_queue_storage_free(rxq, 1);
 	}
 
 	if (qdma_dev->vqs) {
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 7b3e587a8d..93b88acef8 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -1,7 +1,7 @@
 /* * SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -366,7 +366,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	uint8_t num_rxqueue_per_tc;
 	struct dpaa2_queue *mc_q, *mcq;
 	uint32_t tot_queues;
-	int i;
+	int i, ret = 0;
 	struct dpaa2_queue *dpaa2_q;
 
 	PMD_INIT_FUNC_TRACE();
@@ -386,16 +386,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	for (i = 0; i < priv->nb_rx_queues; i++) {
 		mc_q->eth_data = dev->data;
 		priv->rx_vq[i] = mc_q++;
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_q->q_storage = rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
-			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+		dpaa2_q = priv->rx_vq[i];
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
 	}
 
@@ -405,19 +399,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 		if (!priv->rx_err_vq)
 			goto fail;
 
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		dpaa2_q->q_storage = rte_malloc("err_dq_storage",
-					sizeof(struct queue_storage_info_t) *
-					RTE_MAX_LCORE,
-					RTE_CACHE_LINE_SIZE);
-		if (!dpaa2_q->q_storage)
+		dpaa2_q = priv->rx_err_vq;
+		ret = dpaa2_queue_storage_alloc(dpaa2_q,
+			RTE_MAX_LCORE);
+		if (ret)
 			goto fail;
-
-		memset(dpaa2_q->q_storage, 0,
-		       sizeof(struct queue_storage_info_t));
-		for (i = 0; i < RTE_MAX_LCORE; i++)
-			if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i]))
-				goto fail;
 	}
 
 	for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -438,24 +424,17 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 			mc_q->tc_index = i;
 			mc_q->flow_id = 0;
 			priv->tx_conf_vq[i] = mc_q++;
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-			dpaa2_q->q_storage =
-				rte_malloc("dq_storage",
-					sizeof(struct queue_storage_info_t),
-					RTE_CACHE_LINE_SIZE);
-			if (!dpaa2_q->q_storage)
-				goto fail_tx_conf;
-
-			memset(dpaa2_q->q_storage, 0,
-			       sizeof(struct queue_storage_info_t));
-			if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+			dpaa2_q = priv->tx_conf_vq[i];
+			ret = dpaa2_queue_storage_alloc(dpaa2_q,
+					RTE_MAX_LCORE);
+			if (ret)
 				goto fail_tx_conf;
 		}
 	}
 
 	vq_id = 0;
 	for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) {
-		mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+		mcq = priv->rx_vq[vq_id];
 		mcq->tc_index = dist_idx / num_rxqueue_per_tc;
 		mcq->flow_id = dist_idx % num_rxqueue_per_tc;
 		vq_id++;
@@ -465,15 +444,15 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 fail_tx_conf:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i];
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->tx_conf_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->tx_conf_vq[i--] = NULL;
 	}
 	i = priv->nb_tx_queues;
 fail_tx:
 	i -= 1;
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+		dpaa2_q = priv->tx_vq[i];
 		rte_free(dpaa2_q->cscn);
 		priv->tx_vq[i--] = NULL;
 	}
@@ -482,17 +461,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
 	i -= 1;
 	mc_q = priv->rx_vq[0];
 	while (i >= 0) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-		dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_vq[i];
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 		priv->rx_vq[i--] = NULL;
 	}
 
 	if (dpaa2_enable_err_queue) {
-		dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
-		if (dpaa2_q->q_storage)
-			dpaa2_free_dq_storage(dpaa2_q->q_storage);
-		rte_free(dpaa2_q->q_storage);
+		dpaa2_q = priv->rx_err_vq;
+		dpaa2_queue_storage_free(dpaa2_q, RTE_MAX_LCORE);
 	}
 
 	rte_free(mc_q);
@@ -512,20 +488,21 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
 	if (priv->rx_vq[0]) {
 		/* cleaning up queue storage */
 		for (i = 0; i < priv->nb_rx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-			rte_free(dpaa2_q->q_storage);
+			dpaa2_q = priv->rx_vq[i];
+			dpaa2_queue_storage_free(dpaa2_q,
+				RTE_MAX_LCORE);
 		}
 		/* cleanup tx queue cscn */
 		for (i = 0; i < priv->nb_tx_queues; i++) {
-			dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i];
+			dpaa2_q = priv->tx_vq[i];
 			rte_free(dpaa2_q->cscn);
 		}
 		if (priv->flags & DPAA2_TX_CONF_ENABLE) {
 			/* cleanup tx conf queue storage */
 			for (i = 0; i < priv->nb_tx_queues; i++) {
-				dpaa2_q = (struct dpaa2_queue *)
-						priv->tx_conf_vq[i];
-				rte_free(dpaa2_q->q_storage);
+				dpaa2_q = priv->tx_conf_vq[i];
+				dpaa2_queue_storage_free(dpaa2_q,
+					RTE_MAX_LCORE);
 			}
 		}
 		/*free memory for all queues (RX+TX) */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 92e9dd40dc..376291af04 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016-2021 NXP
+ *   Copyright 2016-2024 NXP
  *
  */
 
@@ -647,7 +647,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q)
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
 
-	dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+	dq_storage = dpaa2_q->q_storage[lcore_id]->dq_storage[0];
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -716,7 +716,7 @@ uint16_t
 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage, *dq_storage1 = NULL;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, pull_size;
@@ -724,10 +724,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	struct qbman_swp *swp;
 	const struct qbman_fd *fd;
 	struct qbman_pull_desc pulldesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
+
 	if (unlikely(dpaa2_enable_err_queue))
 		dump_err_pkts(priv->rx_err_vq);
 
@@ -958,7 +960,7 @@ uint16_t
 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ */
-	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+	struct dpaa2_queue *dpaa2_q = queue;
 	struct qbman_result *dq_storage;
 	uint32_t fqid = dpaa2_q->fqid;
 	int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
@@ -984,7 +986,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1115,7 +1117,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
 	swp = DPAA2_PER_LCORE_PORTAL;
 
 	do {
-		dq_storage = dpaa2_q->q_storage->dq_storage[0];
+		dq_storage = dpaa2_q->q_storage[0]->dq_storage[0];
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
@@ -1954,12 +1956,13 @@ dpaa2_dev_loopback_rx(void *queue,
 	struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
 	struct qbman_pull_desc pulldesc;
 	struct qbman_eq_desc eqdesc;
-	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
+	struct queue_storage_info_t *q_storage;
 	struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
 	struct dpaa2_dev_priv *priv = eth_data->dev_private;
 	struct dpaa2_queue *tx_q = priv->tx_vq[0];
 	/* todo - currently we are using 1st TX queue only for loopback*/
 
+	q_storage = dpaa2_q->q_storage[rte_lcore_id()];
 	if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
 		ret = dpaa2_affine_qbman_ethrx_swp();
 		if (ret) {
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
index de8c024abb..34a3c4f6af 100644
--- a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2019 NXP
+ * Copyright 2018-2019, 2024 NXP
  */
 
 #include <stdio.h>
@@ -142,7 +142,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
 
 	cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
 	rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
-	dq_storage = rxq->q_storage->dq_storage[0];
+	dq_storage = rxq->q_storage[0]->dq_storage[0];
 
 	qbman_pull_desc_clear(&pulldesc);
 	qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 04/15] dma/dpaa2: add short FD support
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (2 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
                                     ` (10 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Short FD can be used for single transfer scenario which shows higher
performance than FLE.
1) Save index context in FD att field for short and FLE(NonSG).
2) Identify FD type by att of FD.
3) Force 48 bits address for source address and fle according to spec.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/guides/dmadevs/dpaa2.rst           |   2 +
 drivers/dma/dpaa2/dpaa2_qdma.c         | 314 +++++++++++++++++++------
 drivers/dma/dpaa2/dpaa2_qdma.h         |  69 ++++--
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h |  13 -
 4 files changed, 286 insertions(+), 112 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa2.rst b/doc/guides/dmadevs/dpaa2.rst
index eeeb5d52a8..6ebf7ac030 100644
--- a/doc/guides/dmadevs/dpaa2.rst
+++ b/doc/guides/dmadevs/dpaa2.rst
@@ -81,3 +81,5 @@ Device Arguments
   usage example: ``fslmc:dpdmai.1,fle_pre_populate=1``
 * Use dev arg option ``desc_debug=1`` to enable descriptor debugs.
   usage example: ``fslmc:dpdmai.1,desc_debug=1``
+* Use dev arg option ``short_fd=1`` to enable short FDs.
+  usage example: ``fslmc:dpdmai.1,short_fd=1``
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index ee110741b7..df52d2d6b3 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -16,6 +16,7 @@
 
 #define DPAA2_QDMA_FLE_PRE_POPULATE "fle_pre_populate"
 #define DPAA2_QDMA_DESC_DEBUG "desc_debug"
+#define DPAA2_QDMA_USING_SHORT_FD "short_fd"
 
 static uint32_t dpaa2_coherent_no_alloc_cache;
 static uint32_t dpaa2_coherent_alloc_cache;
@@ -552,7 +553,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 	const struct qdma_cntx_fle_sdd *fle_sdd;
 	const struct qdma_sdd *sdd;
 	const struct qdma_cntx_sg *cntx_sg = NULL;
-	const struct qdma_cntx_long *cntx_long = NULL;
 
 	fle_sdd = container_of(fle, const struct qdma_cntx_fle_sdd, fle[0]);
 	sdd = fle_sdd->sdd;
@@ -575,11 +575,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		QBMAN_FLE_WORD4_FMT_SGE) {
 		cntx_sg = container_of(fle_sdd, const struct qdma_cntx_sg,
 			fle_sdd);
-	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
+	} else if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt !=
 		QBMAN_FLE_WORD4_FMT_SBF) {
-		cntx_long = container_of(fle_sdd, const struct qdma_cntx_long,
-			fle_sdd);
-	} else {
 		DPAA2_QDMA_ERR("Unsupported fle format:%d",
 			fle[DPAA2_QDMA_SRC_FLE].word4.fmt);
 		return;
@@ -590,11 +587,6 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		dpaa2_qdma_sdd_dump(&sdd[i]);
 	}
 
-	if (cntx_long) {
-		DPAA2_QDMA_INFO("long format/Single buffer cntx idx:%d",
-			cntx_long->cntx_idx);
-	}
-
 	if (cntx_sg) {
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
@@ -612,6 +604,8 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 			DPAA2_QDMA_INFO("cntx_idx[%d]:%d", i,
 				cntx_sg->cntx_idx[i]);
 		}
+	} else {
+		DPAA2_QDMA_INFO("long format/Single buffer cntx");
 	}
 }
 
@@ -674,7 +668,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		offsetof(struct qdma_cntx_sg, fle_sdd) +
 		offsetof(struct qdma_cntx_fle_sdd, fle);
 
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_sg);
 
@@ -710,6 +704,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
 		dpaa2_qdma_long_fmt_dump(cntx_sg->fle_sdd.fle);
 
+	dpaa2_qdma_fd_save_att(fd, 0, DPAA2_QDMA_FD_SG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -726,74 +721,178 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static inline void
+qdma_populate_fd_pci(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd,
+	struct dpaa2_qdma_rbp *rbp, int ser)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits(src);
+	fd->simple_pci.saddr_hi = upper_32_bits(src);
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_pci.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+
+	fd->simple_pci.sportid = rbp->sportid;
+
+	fd->simple_pci.svfid = rbp->svfid;
+	fd->simple_pci.spfid = rbp->spfid;
+	fd->simple_pci.svfa = rbp->svfa;
+	fd->simple_pci.dvfid = rbp->dvfid;
+	fd->simple_pci.dpfid = rbp->dpfid;
+	fd->simple_pci.dvfa = rbp->dvfa;
+
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits(dest);
+	fd->simple_pci.daddr_hi = upper_32_bits(dest);
+}
+
+static inline void
+qdma_populate_fd_ddr(uint64_t src, uint64_t dest,
+	uint32_t len, struct qbman_fd *fd, int ser)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(src);
+	fd->simple_ddr.saddr_hi = upper_32_bits(src);
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = DPAA2_QDMA_BMT_DISABLE;
+	fd->simple_ddr.fmt = DPAA2_QDMA_FD_SHORT_FORMAT;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = ser;
+	if (ser)
+		fd->simple.frc |= QDMA_SER_CTX;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss.
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits(dest);
+	fd->simple_ddr.daddr_hi = upper_32_bits(dest);
+}
+
 static int
-dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
-	rte_iova_t src, rte_iova_t dst,
-	uint32_t length, uint64_t flags)
+dpaa2_qdma_short_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
 {
-	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
-	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
-	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
 	int ret = 0, expected;
 	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
-	struct qdma_cntx_long *cntx_long = NULL;
-	rte_iova_t cntx_iova, fle_iova, sdd_iova;
+
+	memset(fd, 0, sizeof(struct qbman_fd));
+
+	if (qdma_vq->rbp.drbp || qdma_vq->rbp.srbp) {
+		/** PCIe EP*/
+		qdma_populate_fd_pci(src,
+			dst, length,
+			fd, &qdma_vq->rbp,
+			is_silent ? 0 : 1);
+	} else {
+		/** DDR or PCIe RC*/
+		qdma_populate_fd_ddr(src,
+			dst, length,
+			fd, is_silent ? 0 : 1);
+	}
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_SHORT);
+	qdma_vq->fd_idx++;
+
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
+		expected = qdma_vq->fd_idx;
+		ret = dpaa2_qdma_multi_eq(qdma_vq);
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
+	} else {
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
+	}
+
+	return ret;
+}
+
+static int
+dpaa2_qdma_long_copy(struct qdma_virt_queue *qdma_vq,
+	rte_iova_t src, rte_iova_t dst, uint32_t length,
+	int is_silent, uint64_t flags)
+{
+	int ret = 0, expected;
+	struct qbman_fd *fd = &qdma_vq->fd[qdma_vq->fd_idx];
+	struct qdma_cntx_fle_sdd *fle_sdd = NULL;
+	rte_iova_t fle_iova, sdd_iova;
 	struct qbman_fle *fle;
 	struct qdma_sdd *sdd;
 
 	memset(fd, 0, sizeof(struct qbman_fd));
 
-	if (qdma_dev->is_silent) {
-		cntx_long = qdma_vq->cntx_long[qdma_vq->silent_idx];
+	if (is_silent) {
+		fle_sdd = qdma_vq->cntx_fle_sdd[qdma_vq->silent_idx];
 	} else {
 		ret = rte_mempool_get(qdma_vq->fle_pool,
-			(void **)&cntx_long);
+			(void **)&fle_sdd);
 		if (ret)
 			return ret;
 		DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-		cntx_long->cntx_idx = DPAA2_QDMA_IDX_FROM_FLAG(flags);
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_long);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_long);
-#endif
+	fle = fle_sdd->fle;
+	fle_iova = (uint64_t)fle - qdma_vq->fle_iova2va_offset;
 
-	fle = cntx_long->fle_sdd.fle;
-	fle_iova = cntx_iova +
-		offsetof(struct qdma_cntx_long, fle_sdd) +
-		offsetof(struct qdma_cntx_fle_sdd, fle);
-
-	DPAA2_SET_FD_ADDR(fd, fle_iova);
+	dpaa2_qdma_fd_set_addr(fd, fle_iova);
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FLC(fd, (uint64_t)cntx_long);
+	DPAA2_SET_FD_FLC(fd, (uint64_t)fle);
 
 	if (qdma_vq->fle_pre_populate) {
 		if (unlikely(!fle[DPAA2_QDMA_SRC_FLE].length)) {
-			fle_sdd_pre_populate(&cntx_long->fle_sdd,
+			fle_sdd_pre_populate(fle_sdd,
 				&qdma_vq->rbp,
 				0, 0, QBMAN_FLE_WORD4_FMT_SBF);
-			if (!qdma_dev->is_silent && cntx_long) {
-				cntx_long->cntx_idx =
-					DPAA2_QDMA_IDX_FROM_FLAG(flags);
-			}
 		}
 
 		fle_post_populate(fle, src, dst, length);
 	} else {
-		sdd = cntx_long->fle_sdd.sdd;
-		sdd_iova = cntx_iova +
-			offsetof(struct qdma_cntx_long, fle_sdd) +
-			offsetof(struct qdma_cntx_fle_sdd, sdd);
+		sdd = fle_sdd->sdd;
+		sdd_iova = (uint64_t)sdd - qdma_vq->fle_iova2va_offset;
 		fle_populate(fle, sdd, sdd_iova, &qdma_vq->rbp,
 			src, dst, length,
 			QBMAN_FLE_WORD4_FMT_SBF);
 	}
 
 	if (unlikely(qdma_vq->flags & DPAA2_QDMA_DESC_DEBUG_FLAG))
-		dpaa2_qdma_long_fmt_dump(cntx_long->fle_sdd.fle);
+		dpaa2_qdma_long_fmt_dump(fle);
 
+	dpaa2_qdma_fd_save_att(fd, DPAA2_QDMA_IDX_FROM_FLAG(flags),
+		DPAA2_QDMA_FD_LONG);
 	qdma_vq->fd_idx++;
 	qdma_vq->silent_idx =
 		(qdma_vq->silent_idx + 1) & (DPAA2_QDMA_MAX_DESC - 1);
@@ -801,15 +900,89 @@ dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num++;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num++;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
 }
 
+static int
+dpaa2_qdma_copy(void *dev_private, uint16_t vchan,
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
+
+	if (qdma_vq->using_short_fd)
+		return dpaa2_qdma_short_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+	else
+		return dpaa2_qdma_long_copy(qdma_vq, src, dst,
+				length, qdma_dev->is_silent, flags);
+}
+
+static inline int
+dpaa2_qdma_dq_fd(const struct qbman_fd *fd,
+	struct qdma_virt_queue *qdma_vq,
+	uint16_t *free_space, uint16_t *fle_elem_nb)
+{
+	uint16_t idx, att;
+	enum dpaa2_qdma_fd_type type;
+	int ret;
+	struct qdma_cntx_sg *cntx_sg;
+	struct qdma_cntx_fle_sdd *fle_sdd;
+
+	att = dpaa2_qdma_fd_get_att(fd);
+	type = DPAA2_QDMA_FD_ATT_TYPE(att);
+	if (type == DPAA2_QDMA_FD_SHORT) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_LONG) {
+		idx = DPAA2_QDMA_FD_ATT_CNTX(att);
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				&idx, 1, free_space);
+		if (unlikely(ret != 1))
+			return -ENOSPC;
+
+		return 0;
+	}
+	if (type == DPAA2_QDMA_FD_SG) {
+		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
+		qdma_vq->fle_elem[*fle_elem_nb] = fle_sdd;
+		(*fle_elem_nb)++;
+		cntx_sg = container_of(fle_sdd,
+				struct qdma_cntx_sg, fle_sdd);
+		ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
+				cntx_sg->cntx_idx,
+				cntx_sg->job_nb, free_space);
+		if (unlikely(ret < cntx_sg->job_nb))
+			return -ENOSPC;
+
+		return 0;
+	}
+
+	DPAA2_QDMA_ERR("Invalid FD type, ATT=0x%04x",
+		fd->simple_ddr.rsv1_att);
+	return -EIO;
+}
+
 static uint16_t
 dpaa2_qdma_dequeue(void *dev_private,
 	uint16_t vchan, const uint16_t nb_cpls,
@@ -829,10 +1002,6 @@ dpaa2_qdma_dequeue(void *dev_private,
 	uint8_t num_rx = 0;
 	const struct qbman_fd *fd;
 	int ret, pull_size;
-	struct qbman_fle *fle;
-	struct qdma_cntx_fle_sdd *fle_sdd;
-	struct qdma_cntx_sg *cntx_sg;
-	struct qdma_cntx_long *cntx_long;
 	uint16_t free_space = 0, fle_elem_nb = 0;
 
 	if (unlikely(qdma_dev->is_silent))
@@ -931,25 +1100,8 @@ dpaa2_qdma_dequeue(void *dev_private,
 				continue;
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
-		fle_sdd = (void *)(uintptr_t)DPAA2_GET_FD_FLC(fd);
-		fle = fle_sdd->fle;
-		qdma_vq->fle_elem[fle_elem_nb] = fle_sdd;
-		fle_elem_nb++;
-		if (fle[DPAA2_QDMA_SRC_FLE].word4.fmt ==
-			QBMAN_FLE_WORD4_FMT_SGE) {
-			cntx_sg = container_of(fle_sdd,
-				struct qdma_cntx_sg, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				cntx_sg->cntx_idx,
-				cntx_sg->job_nb, &free_space);
-		} else {
-			cntx_long = container_of(fle_sdd,
-				struct qdma_cntx_long, fle_sdd);
-			ret = qdma_cntx_idx_ring_eq(qdma_vq->ring_cntx_idx,
-				&cntx_long->cntx_idx,
-				1, &free_space);
-		}
-		if (!ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
+		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -974,8 +1126,10 @@ dpaa2_qdma_dequeue(void *dev_private,
 	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
 
-	rte_mempool_put_bulk(qdma_vq->fle_pool,
-		qdma_vq->fle_elem, fle_elem_nb);
+	if (fle_elem_nb > 0) {
+		rte_mempool_put_bulk(qdma_vq->fle_pool,
+			qdma_vq->fle_elem, fle_elem_nb);
+	}
 
 	num_rx = qdma_cntx_idx_ring_dq(qdma_vq->ring_cntx_idx,
 		cntx_idx, nb_cpls);
@@ -1204,11 +1358,14 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	else
 		qdma_dev->vqs[vchan].flags &= (~DPAA2_QDMA_DESC_DEBUG_FLAG);
 
+	if (dpaa2_qdma_get_devargs(dev->device->devargs, DPAA2_QDMA_USING_SHORT_FD))
+		qdma_dev->vqs[vchan].using_short_fd = 1;
+	else
+		qdma_dev->vqs[vchan].using_short_fd = 0;
+
 	snprintf(pool_name, sizeof(pool_name),
 		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
-	pool_size = RTE_MAX(sizeof(struct qdma_cntx_sg),
-			    sizeof(struct qdma_cntx_long));
-
+	pool_size = sizeof(struct qdma_cntx_sg);
 	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
 			DPAA2_QDMA_MAX_DESC * 2, pool_size,
 			512, 0, NULL, NULL, NULL, NULL,
@@ -1228,7 +1385,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 			return ret;
 		}
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
-				(void **)qdma_dev->vqs[vchan].cntx_long,
+				(void **)qdma_dev->vqs[vchan].cntx_fle_sdd,
 				DPAA2_QDMA_MAX_DESC);
 		if (ret) {
 			DPAA2_QDMA_ERR("long cntx get from %s for silent mode",
@@ -1595,5 +1752,6 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
 	DPAA2_QDMA_FLE_PRE_POPULATE "=<int>"
-	DPAA2_QDMA_DESC_DEBUG"=<int>");
+	DPAA2_QDMA_DESC_DEBUG"=<int>"
+	DPAA2_QDMA_USING_SHORT_FD"=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 371393cb85..0be65e1cc6 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2023 NXP
+ * Copyright 2018-2024 NXP
  */
 
 #ifndef _DPAA2_QDMA_H_
@@ -12,17 +12,8 @@
 #define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
-#define DPAA2_QDMA_VQ_FD_SHORT_FORMAT		(1ULL << 0)
-#define DPAA2_QDMA_VQ_FD_SG_FORMAT		(1ULL << 1)
-#define DPAA2_QDMA_VQ_NO_RESPONSE		(1ULL << 2)
-
 #define DPAA2_DPDMAI_MAX_QUEUES	16
 
-#define QDMA_JOB_HW_CNTX_IDX (RTE_DPAA2_QDMA_JOB_USR_CNTX_IDX + 1)
-
-/** FLE pool cache size */
-#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
-
 /** Notification by FQD_CTX[fqid] */
 #define QDMA_SER_CTX (1 << 8)
 #define DPAA2_RBP_MEM_RW            0x0
@@ -40,9 +31,14 @@
 #define DPAA2_LX2_COHERENT_ALLOCATE_CACHE	0xb
 
 /** Maximum possible H/W Queues on each core */
-#define MAX_HW_QUEUE_PER_CORE		64
+#define MAX_HW_QUEUE_PER_CORE 64
+
+#define DPAA2_QDMA_FD_FLUSH_FORMAT 0x0
+#define DPAA2_QDMA_FD_LONG_FORMAT 0x1
+#define DPAA2_QDMA_FD_SHORT_FORMAT 0x3
 
-#define QDMA_RBP_UPPER_ADDRESS_MASK (0xfff0000000000)
+#define DPAA2_QDMA_BMT_ENABLE 0x1
+#define DPAA2_QDMA_BMT_DISABLE 0x0
 
 /** Source/Destination Descriptor */
 struct qdma_sdd {
@@ -99,8 +95,8 @@ struct qdma_sdd {
 #define QDMA_SG_SL_SHORT	0x1 /* short length */
 #define QDMA_SG_SL_LONG	0x0 /* long length */
 #define QDMA_SG_F	0x1 /* last sg entry */
-#define QDMA_SG_BMT_ENABLE 0x1
-#define QDMA_SG_BMT_DISABLE 0x0
+#define QDMA_SG_BMT_ENABLE DPAA2_QDMA_BMT_ENABLE
+#define QDMA_SG_BMT_DISABLE DPAA2_QDMA_BMT_DISABLE
 
 struct qdma_sg_entry {
 	uint32_t addr_lo;		/* address 0:31 */
@@ -166,6 +162,40 @@ struct dpaa2_qdma_rbp {
 	uint32_t rsv:2;
 };
 
+enum dpaa2_qdma_fd_type {
+	DPAA2_QDMA_FD_SHORT = 1,
+	DPAA2_QDMA_FD_LONG = 2,
+	DPAA2_QDMA_FD_SG = 3
+};
+
+#define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_TYPE(att) \
+	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
+#define DPAA2_QDMA_FD_ATT_CNTX(att) \
+	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+
+static inline void
+dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
+	uint64_t addr)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits(addr);
+	fd->simple_ddr.saddr_hi = upper_32_bits(addr);
+}
+
+static inline void
+dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
+	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
+{
+	fd->simple_ddr.rsv1_att = job_idx |
+		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
+}
+
+static inline uint16_t
+dpaa2_qdma_fd_get_att(const struct qbman_fd *fd)
+{
+	return fd->simple_ddr.rsv1_att;
+}
+
 enum {
 	DPAA2_QDMA_SDD_FLE,
 	DPAA2_QDMA_SRC_FLE,
@@ -193,12 +223,6 @@ struct qdma_cntx_sg {
 	uint16_t rsv[3];
 } __rte_packed;
 
-struct qdma_cntx_long {
-	struct qdma_cntx_fle_sdd fle_sdd;
-	uint16_t cntx_idx;
-	uint16_t rsv[3];
-} __rte_packed;
-
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
 
@@ -241,6 +265,7 @@ struct qdma_virt_queue {
 	struct dpaa2_dpdmai_dev *dpdmai_dev;
 	/** FLE pool for the queue */
 	struct rte_mempool *fle_pool;
+	uint64_t fle_iova2va_offset;
 	void **fle_elem;
 	/** Route by port */
 	struct dpaa2_qdma_rbp rbp;
@@ -252,6 +277,7 @@ struct qdma_virt_queue {
 	uint64_t num_enqueues;
 	/* Total number of dequeues from this VQ */
 	uint64_t num_dequeues;
+	uint64_t copy_num;
 
 	uint16_t vq_id;
 	uint32_t flags;
@@ -261,10 +287,11 @@ struct qdma_virt_queue {
 
 	/**Used for silent enabled*/
 	struct qdma_cntx_sg *cntx_sg[DPAA2_QDMA_MAX_DESC];
-	struct qdma_cntx_long *cntx_long[DPAA2_QDMA_MAX_DESC];
+	struct qdma_cntx_fle_sdd *cntx_fle_sdd[DPAA2_QDMA_MAX_DESC];
 	uint16_t silent_idx;
 
 	int num_valid_jobs;
+	int using_short_fd;
 
 	struct rte_dma_stats stats;
 };
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
index e49604c8fc..df21b39cae 100644
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
@@ -7,19 +7,6 @@
 
 #include <rte_compat.h>
 
-#define RTE_DPAA2_QDMA_IDX_SHIFT_POS 20
-#define RTE_DPAA2_QDMA_LEN_MASK \
-	(~((~0u) << RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_IDX_LEN(idx, len) \
-	((uint32_t)((idx << RTE_DPAA2_QDMA_IDX_SHIFT_POS) | (len & RTE_DPAA2_QDMA_LEN_MASK)))
-
-#define RTE_DPAA2_QDMA_IDX_FROM_LENGTH(length) \
-	((uint16_t)((length) >> RTE_DPAA2_QDMA_IDX_SHIFT_POS))
-
-#define RTE_DPAA2_QDMA_LEN_FROM_LENGTH(length) \
-	((length) & RTE_DPAA2_QDMA_LEN_MASK)
-
 #define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
 #define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
 	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 05/15] dma/dpaa2: limit the max descriptor number
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (3 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 04/15] dma/dpaa2: add short FD support Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
                                     ` (9 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

For non-SG format, the index is saved in FD with
DPAA2_QDMA_FD_ATT_TYPE_OFFSET(13) bits width.

The max descriptor number of ring is power of 2, so the
eventual max number is:
((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) / 2)

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.h | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 0be65e1cc6..250c83c83c 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -8,8 +8,6 @@
 #include "portal/dpaa2_hw_pvt.h"
 #include "portal/dpaa2_hw_dpio.h"
 
-#define DPAA2_QDMA_MAX_DESC		4096
-#define DPAA2_QDMA_MIN_DESC		1
 #define DPAA2_QDMA_MAX_VHANS		64
 
 #define DPAA2_DPDMAI_MAX_QUEUES	16
@@ -169,10 +167,15 @@ enum dpaa2_qdma_fd_type {
 };
 
 #define DPAA2_QDMA_FD_ATT_TYPE_OFFSET 13
+#define DPAA2_QDMA_FD_ATT_MAX_IDX \
+	((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1)
 #define DPAA2_QDMA_FD_ATT_TYPE(att) \
 	(att >> DPAA2_QDMA_FD_ATT_TYPE_OFFSET)
 #define DPAA2_QDMA_FD_ATT_CNTX(att) \
-	(att & ((1 << DPAA2_QDMA_FD_ATT_TYPE_OFFSET) - 1))
+	(att & DPAA2_QDMA_FD_ATT_MAX_IDX)
+
+#define DPAA2_QDMA_MAX_DESC ((DPAA2_QDMA_FD_ATT_MAX_IDX + 1) / 2)
+#define DPAA2_QDMA_MIN_DESC 1
 
 static inline void
 dpaa2_qdma_fd_set_addr(struct qbman_fd *fd,
@@ -186,6 +189,7 @@ static inline void
 dpaa2_qdma_fd_save_att(struct qbman_fd *fd,
 	uint16_t job_idx, enum dpaa2_qdma_fd_type type)
 {
+	RTE_ASSERT(job_idx <= DPAA2_QDMA_FD_ATT_MAX_IDX);
 	fd->simple_ddr.rsv1_att = job_idx |
 		(type << DPAA2_QDMA_FD_ATT_TYPE_OFFSET);
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 06/15] dma/dpaa2: change the DMA copy return value
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (4 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
                                     ` (8 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

The return value of DMA copy/sg copy should be index of
descriptor copied in success.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index df52d2d6b3..5e7640ae08 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -636,6 +636,11 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -ENOTSUP;
 	}
 
+	if (unlikely(!nb_src)) {
+		DPAA2_QDMA_ERR("No SG entry specified");
+		return -EINVAL;
+	}
+
 	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
 			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
@@ -712,10 +717,13 @@ dpaa2_qdma_copy_sg(void *dev_private,
 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
 		expected = qdma_vq->fd_idx;
 		ret = dpaa2_qdma_multi_eq(qdma_vq);
-		if (likely(ret == expected))
-			return 0;
+		if (likely(ret == expected)) {
+			qdma_vq->copy_num += nb_src;
+			return (qdma_vq->copy_num - 1) & UINT16_MAX;
+		}
 	} else {
-		return 0;
+		qdma_vq->copy_num += nb_src;
+		return (qdma_vq->copy_num - 1) & UINT16_MAX;
 	}
 
 	return ret;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 07/15] dma/dpaa2: move the qdma header to common place
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (5 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 08/15] dma/dpaa: refactor driver Gagandeep Singh
                                     ` (7 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Include rte_pmd_dpaax_qdma.h instead of rte_pmd_dpaa2_qdma.h
and change code accordingly.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 doc/api/doxy-api-index.md                 |  2 +-
 doc/api/doxy-api.conf.in                  |  2 +-
 drivers/common/dpaax/meson.build          |  3 +-
 drivers/common/dpaax/rte_pmd_dpaax_qdma.h | 23 +++++++
 drivers/dma/dpaa2/dpaa2_qdma.c            | 84 +++++++++++------------
 drivers/dma/dpaa2/dpaa2_qdma.h            | 10 +--
 drivers/dma/dpaa2/meson.build             |  4 +-
 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h    | 23 -------
 8 files changed, 72 insertions(+), 79 deletions(-)
 create mode 100644 drivers/common/dpaax/rte_pmd_dpaax_qdma.h
 delete mode 100644 drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index abd44b1861..53e963ed47 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -57,7 +57,7 @@ The public API headers are grouped by topics:
   [mlx5](@ref rte_pmd_mlx5.h),
   [dpaa2_mempool](@ref rte_dpaa2_mempool.h),
   [dpaa2_cmdif](@ref rte_pmd_dpaa2_cmdif.h),
-  [dpaa2_qdma](@ref rte_pmd_dpaa2_qdma.h),
+  [dpaax](@ref rte_pmd_dpaax_qdma.h),
   [crypto_scheduler](@ref rte_cryptodev_scheduler.h),
   [dlb2](@ref rte_pmd_dlb2.h),
   [ifpga](@ref rte_pmd_ifpga.h)
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index a8823c046f..33250d867c 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -8,7 +8,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/drivers/bus/vdev \
                           @TOPDIR@/drivers/crypto/cnxk \
                           @TOPDIR@/drivers/crypto/scheduler \
-                          @TOPDIR@/drivers/dma/dpaa2 \
+                          @TOPDIR@/drivers/common/dpaax \
                           @TOPDIR@/drivers/event/dlb2 \
                           @TOPDIR@/drivers/event/cnxk \
                           @TOPDIR@/drivers/mempool/cnxk \
diff --git a/drivers/common/dpaax/meson.build b/drivers/common/dpaax/meson.build
index a162779116..db61b76ce3 100644
--- a/drivers/common/dpaax/meson.build
+++ b/drivers/common/dpaax/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2018 NXP
+# Copyright 2018, 2024 NXP
 
 if not is_linux
     build = false
@@ -16,3 +16,4 @@ endif
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
+headers = files('rte_pmd_dpaax_qdma.h')
diff --git a/drivers/common/dpaax/rte_pmd_dpaax_qdma.h b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
new file mode 100644
index 0000000000..2552a4adfb
--- /dev/null
+++ b/drivers/common/dpaax/rte_pmd_dpaax_qdma.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021-2024 NXP
+ */
+
+#ifndef _RTE_PMD_DPAAX_QDMA_H_
+#define _RTE_PMD_DPAAX_QDMA_H_
+
+#include <rte_compat.h>
+
+#define RTE_DPAAX_QDMA_COPY_IDX_OFFSET 8
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
+#define RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK \
+	(RTE_DPAAX_QDMA_SG_IDX_ADDR_ALIGN - 1)
+#define RTE_DPAAX_QDMA_SG_SUBMIT(idx_addr, flag) \
+	(((uint64_t)idx_addr) | (flag))
+
+#define RTE_DPAAX_QDMA_COPY_SUBMIT(idx, flag) \
+	((idx << RTE_DPAAX_QDMA_COPY_IDX_OFFSET) | (flag))
+
+#define RTE_DPAAX_QDMA_JOB_SUBMIT_MAX 64
+#define RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#endif /* _RTE_PMD_DPAAX_QDMA_H_ */
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 5e7640ae08..71e9ffdfc1 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,7 +10,7 @@
 
 #include <mc/fsl_dpdmai.h>
 
-#include "rte_pmd_dpaa2_qdma.h"
+#include <rte_pmd_dpaax_qdma.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
 
@@ -243,16 +243,16 @@ fle_sdd_pre_populate(struct qdma_cntx_fle_sdd *fle_sdd,
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 *
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dest);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 
 	/* Final bit: 1, for last frame list */
@@ -266,23 +266,21 @@ sg_entry_pre_populate(struct qdma_cntx_sg *sg_cntx)
 	struct qdma_sg_entry *src_sge = sg_cntx->sg_src_entry;
 	struct qdma_sg_entry *dst_sge = sg_cntx->sg_dst_entry;
 
-	for (i = 0; i < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX; i++) {
+	for (i = 0; i < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX; i++) {
 		/* source SG */
 		src_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		/* destination SG */
 		dst_sge[i].ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge[i].ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge[i].ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 	}
 }
 
@@ -381,21 +379,19 @@ sg_entry_populate(const struct rte_dma_sge *src,
 		src_sge->data_len.data_len_sl0 = src[i].length;
 		src_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		dst_sge->addr_lo = (uint32_t)dst[i].addr;
 		dst_sge->addr_hi = (dst[i].addr >> 32);
 		dst_sge->data_len.data_len_sl0 = dst[i].length;
 		dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
 		dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-		dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
-#else
+		/** IOMMU is always on for either VA or PA mode,
+		 * so Bypass Memory Translation should be disabled.
+		 */
 		dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
-#endif
 		total_len += src[i].length;
 
 		if (i == (nb_sge - 1)) {
@@ -475,17 +471,16 @@ fle_populate(struct qbman_fle fle[],
 	}
 	/* source frame list to source buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_SRC_FLE], src_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
-#endif
+	/** IOMMU is always on for either VA or PA mode,
+	 * so Bypass Memory Translation should be disabled.
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_SRC_FLE]);
+	 * DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
+	 */
 	fle[DPAA2_QDMA_SRC_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_SRC_FLE], len);
 
 	/* destination frame list to destination buffer */
 	DPAA2_SET_FLE_ADDR(&fle[DPAA2_QDMA_DST_FLE], dst_iova);
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	DPAA2_SET_FLE_BMT(&fle[DPAA2_QDMA_DST_FLE]);
-#endif
 	fle[DPAA2_QDMA_DST_FLE].word4.fmt = fmt;
 	DPAA2_SET_FLE_LEN(&fle[DPAA2_QDMA_DST_FLE], len);
 
@@ -591,7 +586,7 @@ dpaa2_qdma_long_fmt_dump(const struct qbman_fle *fle)
 		DPAA2_QDMA_INFO("long format/SG format, job number:%d",
 			cntx_sg->job_nb);
 		if (!cntx_sg->job_nb ||
-			cntx_sg->job_nb > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX) {
+			cntx_sg->job_nb > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX) {
 			DPAA2_QDMA_ERR("Invalid SG job number:%d",
 				cntx_sg->job_nb);
 			return;
@@ -641,9 +636,9 @@ dpaa2_qdma_copy_sg(void *dev_private,
 		return -EINVAL;
 	}
 
-	if (unlikely(nb_src > RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)) {
+	if (unlikely(nb_src > RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)) {
 		DPAA2_QDMA_ERR("SG entry number(%d) > MAX(%d)",
-			nb_src, RTE_DPAA2_QDMA_JOB_SUBMIT_MAX);
+			nb_src, RTE_DPAAX_QDMA_JOB_SUBMIT_MAX);
 		return -EINVAL;
 	}
 
@@ -662,11 +657,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			cntx_sg->cntx_idx[i] = idx_addr[i];
 	}
 
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-	cntx_iova = rte_mempool_virt2iova(cntx_sg);
-#else
-	cntx_iova = DPAA2_VADDR_TO_IOVA(cntx_sg);
-#endif
+	cntx_iova = (uint64_t)cntx_sg - qdma_vq->fle_iova2va_offset;
 
 	fle = cntx_sg->fle_sdd.fle;
 	fle_iova = cntx_iova +
@@ -698,8 +689,7 @@ dpaa2_qdma_copy_sg(void *dev_private,
 			offsetof(struct qdma_cntx_sg, sg_src_entry);
 		dst_sge_iova = cntx_iova +
 			offsetof(struct qdma_cntx_sg, sg_dst_entry);
-		len = sg_entry_populate(src, dst,
-			cntx_sg, nb_src);
+		len = sg_entry_populate(src, dst, cntx_sg, nb_src);
 
 		fle_populate(fle, sdd, sdd_iova,
 			&qdma_vq->rbp, src_sge_iova, dst_sge_iova, len,
@@ -1042,7 +1032,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 			q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
-			(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+			DPAA2_VADDR_TO_IOVA(dq_storage), 1);
 		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
 			while (!qbman_check_command_complete(
 			       get_swp_active_dqs(
@@ -1077,7 +1067,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 	qbman_pull_desc_set_numframes(&pulldesc, pull_size);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
-		(size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		DPAA2_VADDR_TO_IOVA(dq_storage1), 1);
 
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
@@ -1109,7 +1099,7 @@ dpaa2_qdma_dequeue(void *dev_private,
 		}
 		fd = qbman_result_DQ_fd(dq_storage);
 		ret = dpaa2_qdma_dq_fd(fd, qdma_vq, &free_space, &fle_elem_nb);
-		if (ret || free_space < RTE_DPAA2_QDMA_JOB_SUBMIT_MAX)
+		if (ret || free_space < RTE_DPAAX_QDMA_JOB_SUBMIT_MAX)
 			pending = 0;
 
 		dq_storage++;
@@ -1162,11 +1152,11 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
 		RTE_DMA_CAPA_SILENT |
 		RTE_DMA_CAPA_OPS_COPY |
 		RTE_DMA_CAPA_OPS_COPY_SG;
-	dev_info->dev_capa |= RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX;
+	dev_info->dev_capa |= RTE_DMA_CAPA_DPAAX_QDMA_FLAGS_INDEX;
 	dev_info->max_vchans = dpdmai_dev->num_queues;
 	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
 	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
-	dev_info->max_sges = RTE_DPAA2_QDMA_JOB_SUBMIT_MAX;
+	dev_info->max_sges = RTE_DPAAX_QDMA_JOB_SUBMIT_MAX;
 	dev_info->dev_name = dev->device->name;
 	if (dpdmai_dev->qdma_dev)
 		dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
@@ -1347,6 +1337,7 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 	uint32_t pool_size;
 	char pool_name[64];
 	int ret;
+	uint64_t iova, va;
 
 	DPAA2_QDMA_FUNC_TRACE();
 
@@ -1382,6 +1373,9 @@ dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
 		DPAA2_QDMA_ERR("%s create failed", pool_name);
 		return -ENOMEM;
 	}
+	iova = qdma_dev->vqs[vchan].fle_pool->mz->iova;
+	va = qdma_dev->vqs[vchan].fle_pool->mz->addr_64;
+	qdma_dev->vqs[vchan].fle_iova2va_offset = va - iova;
 
 	if (qdma_dev->is_silent) {
 		ret = rte_mempool_get_bulk(qdma_dev->vqs[vchan].fle_pool,
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index 250c83c83c..0fd1debaf8 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -220,18 +220,18 @@ struct qdma_cntx_fle_sdd {
 
 struct qdma_cntx_sg {
 	struct qdma_cntx_fle_sdd fle_sdd;
-	struct qdma_sg_entry sg_src_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	struct qdma_sg_entry sg_dst_entry[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
-	uint16_t cntx_idx[RTE_DPAA2_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_src_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	struct qdma_sg_entry sg_dst_entry[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
+	uint16_t cntx_idx[RTE_DPAAX_QDMA_JOB_SUBMIT_MAX];
 	uint16_t job_nb;
 	uint16_t rsv[3];
 } __rte_packed;
 
 #define DPAA2_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
-	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK)))
+	((void *)(uintptr_t)((flag) - ((flag) & RTE_DPAAX_QDMA_SG_IDX_ADDR_MASK)))
 
 #define DPAA2_QDMA_IDX_FROM_FLAG(flag) \
-	((flag) >> RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
+	((flag) >> RTE_DPAAX_QDMA_COPY_IDX_OFFSET)
 
 /** Represents a DPDMAI device */
 struct dpaa2_dpdmai_dev {
diff --git a/drivers/dma/dpaa2/meson.build b/drivers/dma/dpaa2/meson.build
index a99151e2a5..a523f5edb4 100644
--- a/drivers/dma/dpaa2/meson.build
+++ b/drivers/dma/dpaa2/meson.build
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: BSD-3-Clause
-# Copyright 2021 NXP
+# Copyright 2021, 2024 NXP
 
 if not is_linux
     build = false
@@ -14,5 +14,3 @@ sources = files('dpaa2_qdma.c')
 if cc.has_argument('-Wno-pointer-arith')
     cflags += '-Wno-pointer-arith'
 endif
-
-headers = files('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h b/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
deleted file mode 100644
index df21b39cae..0000000000
--- a/drivers/dma/dpaa2/rte_pmd_dpaa2_qdma.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021-2023 NXP
- */
-
-#ifndef _RTE_PMD_DPAA2_QDMA_H_
-#define _RTE_PMD_DPAA2_QDMA_H_
-
-#include <rte_compat.h>
-
-#define RTE_DPAA2_QDMA_COPY_IDX_OFFSET 8
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN \
-	RTE_BIT64(RTE_DPAA2_QDMA_COPY_IDX_OFFSET)
-#define RTE_DPAA2_QDMA_SG_IDX_ADDR_MASK \
-	(RTE_DPAA2_QDMA_SG_IDX_ADDR_ALIGN - 1)
-#define RTE_DPAA2_QDMA_SG_SUBMIT(idx_addr, flag) \
-	(((uint64_t)idx_addr) | (flag))
-
-#define RTE_DPAA2_QDMA_COPY_SUBMIT(idx, flag) \
-	((idx << RTE_DPAA2_QDMA_COPY_IDX_OFFSET) | (flag))
-
-#define RTE_DPAA2_QDMA_JOB_SUBMIT_MAX (32 + 8)
-#define RTE_DMA_CAPA_DPAA2_QDMA_FLAGS_INDEX RTE_BIT64(63)
-#endif /* _RTE_PMD_DPAA2_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 08/15] dma/dpaa: refactor driver
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (6 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
                                     ` (6 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch refactor the DPAA DMA driver code with changes:
 - HW descriptors rename and update with details.
 - update qdma engine and queue structures
 - using rte_ring APIs for enqueue and dequeue.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 1334 +++++++++++++++++++---------------
 drivers/dma/dpaa/dpaa_qdma.h |  222 +++---
 2 files changed, 868 insertions(+), 688 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 3d4fd818f8..d162d84c45 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #include <bus_dpaa_driver.h>
@@ -8,69 +8,71 @@
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static uint32_t s_sg_max_entry_sz = 2000;
+
 static inline void
-qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
 {
 	ccdf->addr_hi = upper_32_bits(addr);
 	ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
-static inline u64
-qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
+static inline void
+qdma_desc_sge_addr_set64(struct fsl_qdma_comp_sg_desc *sge, u64 addr)
 {
-	return ccdf->cfg8b_w1 & 0xff;
+	sge->addr_hi = upper_32_bits(addr);
+	sge->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
 }
 
 static inline int
-qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
+qdma_ccdf_get_queue(struct fsl_qdma_comp_cmd_desc *ccdf,
+	uint8_t *queue_idx)
 {
-	return (rte_le_to_cpu_32(ccdf->cfg) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_OFFSET;
-}
+	uint64_t addr = ((uint64_t)ccdf->addr_hi) << 32 | ccdf->addr_lo;
+
+	if (addr && queue_idx)
+		*queue_idx = ccdf->queue;
+	if (addr) {
+		ccdf->addr_hi = 0;
+		ccdf->addr_lo = 0;
+		return true;
+	}
 
-static inline void
-qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
-{
-	ccdf->cfg = rte_cpu_to_le_32(QDMA_CCDF_FOTMAT | offset);
+	return false;
 }
 
 static inline int
-qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
+ilog2(int x)
 {
-	return (rte_le_to_cpu_32(ccdf->status) & QDMA_CCDF_MASK)
-		>> QDMA_CCDF_STATUS;
-}
+	int log = 0;
 
-static inline void
-qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
-{
-	ccdf->status = rte_cpu_to_le_32(QDMA_CCDF_SER | status);
+	x >>= 1;
+
+	while (x) {
+		log++;
+		x >>= 1;
+	}
+	return log;
 }
 
-static inline void
-qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qsize(uint32_t q_size)
 {
-	csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
+	return (ilog2(q_size) - ilog2(64));
 }
 
-static inline void
-qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
+static inline int
+ilog2_qthld(uint32_t q_thld)
 {
-	csgf->cfg = rte_cpu_to_le_32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
+	return (ilog2(q_thld) - ilog2(16));
 }
 
 static inline int
-ilog2(int x)
+fsl_qdma_queue_bd_in_hw(struct fsl_qdma_queue *fsl_queue)
 {
-	int log = 0;
-
-	x >>= 1;
+	struct rte_dma_stats *stats = &fsl_queue->stats;
 
-	while (x) {
-		log++;
-		x >>= 1;
-	}
-	return log;
+	return (stats->submitted - stats->completed);
 }
 
 static u32
@@ -97,12 +99,12 @@ qdma_writel_be(u32 val, void *addr)
 	QDMA_OUT_BE(addr, val);
 }
 
-static void
-*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+static void *
+dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 {
 	void *virt_addr;
 
-	virt_addr = rte_malloc("dma pool alloc", size, aligned);
+	virt_addr = rte_zmalloc(nm, size, aligned);
 	if (!virt_addr)
 		return NULL;
 
@@ -111,268 +113,221 @@ static void
 	return virt_addr;
 }
 
-static void
-dma_pool_free(void *addr)
-{
-	rte_free(addr);
-}
-
-static void
-fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
-{
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
-	int id;
-
-	if (--fsl_queue->count)
-		goto finally;
-
-	id = (fsl_qdma->block_base - fsl_queue->block_base) /
-	      fsl_qdma->block_offset;
-
-	while (rte_atomic32_read(&wait_task[id]) == 1)
-		rte_delay_us(QDMA_DELAY);
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_used,	list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &fsl_queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		dma_pool_free(comp_temp->virt_addr);
-		dma_pool_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
-
-finally:
-	fsl_qdma->desc_allocated--;
-}
-
-static void
-fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
-				      dma_addr_t dst, dma_addr_t src, u32 len)
-{
-	struct fsl_qdma_format *csgf_src, *csgf_dest;
-
-	/* Note: command table (fsl_comp->virt_addr) is getting filled
-	 * directly in cmd descriptors of queues while enqueuing the descriptor
-	 * please refer fsl_qdma_enqueue_desc
-	 * frame list table (virt_addr) + 1) and source,
-	 * destination descriptor table
-	 * (fsl_comp->desc_virt_addr and fsl_comp->desc_virt_addr+1) move to
-	 * the control path to fsl_qdma_pre_request_enqueue_comp_sd_desc
-	 */
-	csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
-	csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
-
-	/* Status notification is enqueued to status queue. */
-	qdma_desc_addr_set64(csgf_src, src);
-	qdma_csgf_set_len(csgf_src, len);
-	qdma_desc_addr_set64(csgf_dest, dst);
-	qdma_csgf_set_len(csgf_dest, len);
-	/* This entry is the last entry. */
-	qdma_csgf_set_f(csgf_dest, len);
-}
-
 /*
  * Pre-request command descriptor and compound S/G for enqueue.
  */
 static int
-fsl_qdma_pre_request_enqueue_comp_sd_desc(
-					struct fsl_qdma_queue *queue,
-					int size, int aligned)
+fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
-	struct fsl_qdma_comp *comp_temp, *_comp_temp;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
-	struct fsl_qdma_format *csgf_desc;
-	int i;
-
-	for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLOW); i++) {
-		comp_temp = rte_zmalloc("qdma: comp temp",
-					sizeof(*comp_temp), 0);
-		if (!comp_temp)
-			return -ENOMEM;
-
-		comp_temp->virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
-		if (!comp_temp->virt_addr) {
-			rte_free(comp_temp);
+	struct fsl_qdma_comp_cmd_desc *ccdf;
+	uint16_t i, j;
+	struct fsl_qdma_cmpd_ft *ft;
+
+	for (i = 0; i < queue->n_cq; i++) {
+		dma_addr_t phy_ft = 0;
+
+		queue->ft[i] = dma_pool_alloc(NULL,
+			sizeof(struct fsl_qdma_cmpd_ft),
+			RTE_CACHE_LINE_SIZE, &phy_ft);
+		if (!queue->ft[i])
+			goto fail;
+		if (((uint64_t)queue->ft[i]) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] addr(%p) not cache aligned",
+				i, queue->ft[i]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		comp_temp->desc_virt_addr =
-		dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
-		if (!comp_temp->desc_virt_addr) {
-			rte_free(comp_temp->virt_addr);
-			rte_free(comp_temp);
+		if (((uint64_t)(&queue->ft[i]->desc_ssge[0])) &
+			(RTE_CACHE_LINE_SIZE - 1)) {
+			DPAA_QDMA_ERR("FD[%d] SGE addr(%p) not cache aligned",
+				i, &queue->ft[i]->desc_ssge[0]);
+			rte_free(queue->ft[i]);
+			queue->ft[i] = NULL;
 			goto fail;
 		}
-
-		memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
-		memset(comp_temp->desc_virt_addr, 0,
-		       FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
-
-		csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
-		sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
-		ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+		queue->ft[i]->phy_ssge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_ssge);
+		queue->ft[i]->phy_dsge = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, desc_dsge);
+		queue->ft[i]->phy_df = phy_ft +
+			offsetof(struct fsl_qdma_cmpd_ft, df);
+
+		ft = queue->ft[i];
+		sdf = &ft->df.sdf;
+		ddf = &ft->df.ddf;
 		/* Compound Command Descriptor(Frame List Table) */
-		qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+		qdma_desc_sge_addr_set64(&ft->desc_buf, ft->phy_df);
 		/* It must be 32 as Compound S/G Descriptor */
-		qdma_csgf_set_len(csgf_desc, 32);
+		ft->desc_buf.length = sizeof(struct fsl_qdma_df);
+
 		/* Descriptor Buffer */
-		sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
-			       FSL_QDMA_CMD_RWTTYPE_OFFSET);
-		ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
-				FSL_QDMA_CMD_LWC_OFFSET);
-
-		list_add_tail(&comp_temp->list, &queue->comp_free);
+		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+
+		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
+		ddf->lwc = FSL_QDMA_CMD_LWC;
+
+		ccdf = &queue->cq[i];
+		qdma_desc_addr_set64(ccdf, phy_ft);
+		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
+
+		ccdf->queue = queue->queue_id;
 	}
+	queue->ci = 0;
 
 	return 0;
 
 fail:
-	list_for_each_entry_safe(comp_temp, _comp_temp,
-				 &queue->comp_free, list) {
-		list_del(&comp_temp->list);
-		rte_free(comp_temp->virt_addr);
-		rte_free(comp_temp->desc_virt_addr);
-		rte_free(comp_temp);
-	}
+	for (j = 0; j < i; j++)
+		rte_free(queue->ft[j]);
 
 	return -ENOMEM;
 }
 
-/*
- * Request a command descriptor for enqueue.
- */
-static struct fsl_qdma_comp *
-fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
+static int
+fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma,
+	int queue_id, int block_id)
 {
-	struct fsl_qdma_queue *queue = fsl_chan->queue;
-	struct fsl_qdma_comp *comp_temp;
-
-	if (!list_empty(&queue->comp_free)) {
-		comp_temp = list_first_entry(&queue->comp_free,
-					     struct fsl_qdma_comp,
-					     list);
-		list_del(&comp_temp->list);
-		return comp_temp;
+	struct fsl_qdma_queue *cmd_queue;
+	uint32_t queue_size;
+	char nm[RTE_MEMZONE_NAMESIZE];
+
+	cmd_queue = &fsl_qdma->cmd_queues[block_id][queue_id];
+	cmd_queue->engine = fsl_qdma;
+
+	queue_size = sizeof(struct fsl_qdma_comp_cmd_desc) *
+		QDMA_QUEUE_SIZE;
+
+	sprintf(nm, "Command queue_%d_%d",
+		block_id, queue_id);
+	cmd_queue->cq = dma_pool_alloc(nm, queue_size,
+		queue_size, &cmd_queue->bus_addr);
+	if (!cmd_queue->cq) {
+		DPAA_QDMA_ERR("%s alloc failed!", nm);
+		return -ENOMEM;
 	}
 
-	return NULL;
-}
-
-static struct fsl_qdma_queue
-*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
-{
-	struct fsl_qdma_queue *queue_head, *queue_temp;
-	int len, i, j;
-	int queue_num;
-	int blocks;
-	unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
-
-	queue_num = fsl_qdma->n_queues;
-	blocks = fsl_qdma->num_blocks;
-
-	len = sizeof(*queue_head) * queue_num * blocks;
-	queue_head = rte_zmalloc("qdma: queue head", len, 0);
-	if (!queue_head)
-		return NULL;
+	cmd_queue->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
+	cmd_queue->n_cq = QDMA_QUEUE_SIZE;
+	cmd_queue->queue_id = queue_id;
+	cmd_queue->block_id = block_id;
+	cmd_queue->pending_start = 0;
+	cmd_queue->pending_num = 0;
+	cmd_queue->complete_start = 0;
+
+	sprintf(nm, "Compound Table_%d_%d",
+		block_id, queue_id);
+	cmd_queue->ft = rte_zmalloc(nm,
+			sizeof(void *) * QDMA_QUEUE_SIZE, 0);
+	if (!cmd_queue->ft) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "Pending_desc_%d_%d",
+		block_id, queue_id);
+	cmd_queue->pending_desc = rte_zmalloc(nm,
+		sizeof(struct fsl_qdma_desc) * FSL_QDMA_MAX_DESC_NUM, 0);
+	if (!cmd_queue->pending_desc) {
+		DPAA_QDMA_ERR("%s zmalloc failed!", nm);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-burst_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_burst = rte_ring_create(nm,
+		QDMA_QUEUE_SIZE * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_burst) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_desc = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_desc) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
+	sprintf(nm, "complete-pool-desc_ring_%d_%d",
+		block_id, queue_id);
+	cmd_queue->complete_pool = rte_ring_create(nm,
+		FSL_QDMA_MAX_DESC_NUM * 2, 0,
+		RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (!cmd_queue->complete_pool) {
+		DPAA_QDMA_ERR("%s create failed!", nm);
+		rte_ring_free(cmd_queue->complete_desc);
+		rte_ring_free(cmd_queue->complete_burst);
+		rte_free(cmd_queue->pending_desc);
+		rte_free(cmd_queue->ft);
+		rte_free(cmd_queue->cq);
+		return -ENOMEM;
+	}
 
-	for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
-		queue_size[i] = QDMA_QUEUE_SIZE;
+	memset(&cmd_queue->stats, 0, sizeof(struct rte_dma_stats));
+	cmd_queue->pending_max = FSL_QDMA_MAX_DESC_NUM;
 
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-			    queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-				DPAA_QDMA_ERR("Get wrong queue-sizes.");
-				goto fail;
-			}
-			queue_temp = queue_head + i + (j * queue_num);
-
-			queue_temp->cq =
-			dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-				       queue_size[i],
-				       sizeof(struct fsl_qdma_format) *
-				       queue_size[i], &queue_temp->bus_addr);
-
-			if (!queue_temp->cq)
-				goto fail;
-
-			memset(queue_temp->cq, 0x0, queue_size[i] *
-			       sizeof(struct fsl_qdma_format));
-
-			queue_temp->block_base = fsl_qdma->block_base +
-				FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-			queue_temp->n_cq = queue_size[i];
-			queue_temp->id = i;
-			queue_temp->count = 0;
-			queue_temp->pending = 0;
-			queue_temp->virt_head = queue_temp->cq;
-			queue_temp->stats = (struct rte_dma_stats){0};
-		}
-	}
-	return queue_head;
+	return 0;
+}
 
-fail:
-	for (j = 0; j < blocks; j++) {
-		for (i = 0; i < queue_num; i++) {
-			queue_temp = queue_head + i + (j * queue_num);
-			dma_pool_free(queue_temp->cq);
-		}
-	}
-	rte_free(queue_head);
+static void
+fsl_qdma_free_cmdq_res(struct fsl_qdma_queue *queue)
+{
+	rte_free(queue->ft);
+	rte_free(queue->cq);
+	rte_free(queue->pending_desc);
+	rte_ring_free(queue->complete_burst);
+	rte_ring_free(queue->complete_desc);
+	rte_ring_free(queue->complete_pool);
+}
 
-	return NULL;
+static void
+fsl_qdma_free_stq_res(struct fsl_qdma_status_queue *queue)
+{
+	rte_free(queue->cq);
 }
 
-static struct
-fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+static int
+fsl_qdma_prep_status_queue(struct fsl_qdma_engine *fsl_qdma,
+	uint32_t block_id)
 {
-	struct fsl_qdma_queue *status_head;
-	unsigned int status_size;
+	struct fsl_qdma_status_queue *status;
+	uint32_t status_size;
 
-	status_size = QDMA_STATUS_SIZE;
-	if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
-	    status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
-		DPAA_QDMA_ERR("Get wrong status_size.");
-		return NULL;
-	}
+	status = &fsl_qdma->stat_queues[block_id];
+	status->engine = fsl_qdma;
 
-	status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
-	if (!status_head)
-		return NULL;
+	status_size = QDMA_STATUS_SIZE *
+		sizeof(struct fsl_qdma_comp_cmd_desc);
 
-	/*
-	 * Buffer for queue command
-	 */
-	status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 sizeof(struct fsl_qdma_format) *
-					 status_size,
-					 &status_head->bus_addr);
-
-	if (!status_head->cq) {
-		rte_free(status_head);
-		return NULL;
-	}
+	status->cq = dma_pool_alloc(NULL, status_size,
+		status_size, &status->bus_addr);
+
+	if (!status->cq)
+		return -ENOMEM;
 
-	memset(status_head->cq, 0x0, status_size *
-	       sizeof(struct fsl_qdma_format));
-	status_head->n_cq = status_size;
-	status_head->virt_head = status_head->cq;
+	memset(status->cq, 0x0, status_size);
+	status->n_cq = QDMA_STATUS_SIZE;
+	status->complete = 0;
+	status->block_id = block_id;
+	status->block_vir = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	return status_head;
+	return 0;
 }
 
 static int
@@ -420,59 +375,41 @@ fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static int
-fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
-				 void *block, int id, const uint16_t nb_cpls,
-				 uint16_t *last_idx,
-				 enum rte_dma_status_code *status)
+static void
+fsl_qdma_data_validation(struct fsl_qdma_desc *desc[],
+	uint8_t num, struct fsl_qdma_queue *fsl_queue)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
-	struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
-	struct fsl_qdma_queue *temp_queue;
-	struct fsl_qdma_format *status_addr;
-	struct fsl_qdma_comp *fsl_comp = NULL;
-	u32 reg, i;
-	int count = 0;
-
-	while (count < nb_cpls) {
-		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
-		if (reg & FSL_QDMA_BSQSR_QE_BE)
-			return count;
-
-		status_addr = fsl_status->virt_head;
-
-		i = qdma_ccdf_get_queue(status_addr) +
-			id * fsl_qdma->n_queues;
-		temp_queue = fsl_queue + i;
-		fsl_comp = list_first_entry(&temp_queue->comp_used,
-					    struct fsl_qdma_comp,
-					    list);
-		list_del(&fsl_comp->list);
-
-		reg = qdma_readl_be(block + FSL_QDMA_BSQMR);
-		reg |= FSL_QDMA_BSQMR_DI_BE;
-
-		qdma_desc_addr_set64(status_addr, 0x0);
-		fsl_status->virt_head++;
-		if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
-			fsl_status->virt_head = fsl_status->cq;
-		qdma_writel_be(reg, block + FSL_QDMA_BSQMR);
-		*last_idx = fsl_comp->index;
-		if (status != NULL)
-			status[count] = RTE_DMA_STATUS_SUCCESSFUL;
-
-		list_add_tail(&fsl_comp->list, &temp_queue->comp_free);
-		count++;
-
+	uint32_t i, j;
+	uint8_t *v_src, *v_dst;
+	char err_msg[512];
+	int offset;
+
+
+	offset = sprintf(err_msg, "Fatal TC%d/queue%d: ",
+		fsl_queue->block_id,
+		fsl_queue->queue_id);
+	for (i = 0; i < num; i++) {
+		v_src = rte_mem_iova2virt(desc[i]->src);
+		v_dst = rte_mem_iova2virt(desc[i]->dst);
+		for (j = 0; j < desc[i]->len; j++) {
+			if (v_src[j] != v_dst[j]) {
+				sprintf(&err_msg[offset],
+					"job[%"PRIu64"]:src(%p)[%d](%d)!=dst(%p)[%d](%d)",
+					desc[i]->flag, v_src, j, v_src[j],
+					v_dst, j, v_dst[j]);
+				DPAA_QDMA_ERR("%s, stop validating!",
+					err_msg);
+				return;
+			}
+		}
 	}
-	return count;
 }
 
 static int
 fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 	struct fsl_qdma_queue *temp;
+	struct fsl_qdma_status_queue *temp_stat;
 	void *ctrl = fsl_qdma->ctrl_base;
 	void *block;
 	u32 i, j;
@@ -489,8 +426,8 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	for (j = 0; j < fsl_qdma->num_blocks; j++) {
 		block = fsl_qdma->block_base +
 			FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
-		for (i = 0; i < fsl_qdma->n_queues; i++) {
-			temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+		for (i = 0; i < QDMA_QUEUES; i++) {
+			temp = &fsl_qdma->cmd_queues[j][i];
 			/*
 			 * Initialize Command Queue registers to
 			 * point to the first
@@ -531,18 +468,15 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 		 * Enqueue Pointer Address Registers
 		 */
 
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEEPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEPAR);
-		qdma_writel(
-			    upper_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQEDPAR);
-		qdma_writel(
-			    lower_32_bits(fsl_qdma->status[j]->bus_addr),
-			    block + FSL_QDMA_SQDPAR);
+		temp_stat = &fsl_qdma->stat_queues[j];
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEEPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEPAR);
+		qdma_writel(upper_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQEDPAR);
+		qdma_writel(lower_32_bits(temp_stat->bus_addr),
+			block + FSL_QDMA_SQDPAR);
 		/* Desiable status queue interrupt. */
 
 		qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
@@ -551,7 +485,7 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 		/* Initialize the status queue mode. */
 		reg = FSL_QDMA_BSQMR_EN;
-		val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+		val = ilog2_qsize(temp_stat->n_cq);
 		reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
 		qdma_writel(reg, block + FSL_QDMA_BSQMR);
 	}
@@ -563,158 +497,393 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 	return 0;
 }
 
-static void *
-fsl_qdma_prep_memcpy(void *fsl_chan, dma_addr_t dst,
-			   dma_addr_t src, size_t len,
-			   void *call_back,
-			   void *param)
+static uint16_t
+dpaa_qdma_block_dequeue(struct fsl_qdma_engine *fsl_qdma,
+	uint8_t block_id)
 {
-	struct fsl_qdma_comp *fsl_comp;
+	struct fsl_qdma_status_queue *stat_queue;
+	struct fsl_qdma_queue *cmd_queue;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t start, count = 0;
+	uint8_t qid = 0;
+	uint32_t reg;
+	int ret;
+	uint8_t *block;
+	uint16_t *dq_complete;
+	struct fsl_qdma_desc *desc[FSL_QDMA_SG_MAX_ENTRY];
 
-	fsl_comp =
-	fsl_qdma_request_enqueue_desc((struct fsl_qdma_chan *)fsl_chan);
-	if (!fsl_comp)
-		return NULL;
+	stat_queue = &fsl_qdma->stat_queues[block_id];
+	cq = stat_queue->cq;
+	start = stat_queue->complete;
+
+	block = fsl_qdma->block_base +
+		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, block_id);
 
-	fsl_comp->qchan = fsl_chan;
-	fsl_comp->call_back_func = call_back;
-	fsl_comp->params = param;
+	do {
+		reg = qdma_readl_be(block + FSL_QDMA_BSQSR);
+		if (reg & FSL_QDMA_BSQSR_QE_BE)
+			break;
 
-	fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
-	return (void *)fsl_comp;
+		qdma_writel_be(FSL_QDMA_BSQMR_DI, block + FSL_QDMA_BSQMR);
+		ret = qdma_ccdf_get_queue(&cq[start], &qid);
+		if (ret == true) {
+			cmd_queue = &fsl_qdma->cmd_queues[block_id][qid];
+
+			ret = rte_ring_dequeue(cmd_queue->complete_burst,
+				(void **)&dq_complete);
+			if (ret) {
+				DPAA_QDMA_ERR("DQ desc number failed!");
+				break;
+			}
+
+			ret = rte_ring_dequeue_bulk(cmd_queue->complete_desc,
+				(void **)desc, *dq_complete, NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("DQ %d descs failed!(%d)",
+					*dq_complete, ret);
+				break;
+			}
+
+			fsl_qdma_data_validation(desc, *dq_complete, cmd_queue);
+
+			ret = rte_ring_enqueue_bulk(cmd_queue->complete_pool,
+				(void **)desc, (*dq_complete), NULL);
+			if (ret != (*dq_complete)) {
+				DPAA_QDMA_ERR("Failed desc eq %d!=%d to %s",
+					ret, *dq_complete,
+					cmd_queue->complete_pool->name);
+				break;
+			}
+
+			cmd_queue->complete_start =
+				(cmd_queue->complete_start + (*dq_complete)) &
+				(cmd_queue->pending_max - 1);
+			cmd_queue->stats.completed++;
+
+			start++;
+			if (unlikely(start == stat_queue->n_cq))
+				start = 0;
+			count++;
+		} else {
+			DPAA_QDMA_ERR("Block%d not empty but dq-queue failed!",
+				block_id);
+			break;
+		}
+	} while (1);
+	stat_queue->complete = start;
+
+	return count;
 }
 
 static int
-fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan,
-				  struct fsl_qdma_comp *fsl_comp,
-				  uint64_t flags)
+fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
+	uint16_t num)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	struct fsl_qdma_format *ccdf;
-	u32 reg;
+	uint16_t i, idx, start, dq;
+	int ret, dq_cnt;
+
 
-	/* retrieve and store the register value in big endian
-	 * to avoid bits swap
-	 */
-	reg = qdma_readl_be(block +
-			 FSL_QDMA_BCQSR(fsl_queue->id));
-	if (reg & (FSL_QDMA_BCQSR_QF_XOFF_BE))
-		return -1;
-
-	/* filling descriptor  command table */
-	ccdf = (struct fsl_qdma_format *)fsl_queue->virt_head;
-	qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
-	qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(fsl_comp->virt_addr));
-	qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(fsl_comp->virt_addr));
-	fsl_comp->index = fsl_queue->virt_head - fsl_queue->cq;
-	fsl_queue->virt_head++;
-
-	if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
-		fsl_queue->virt_head = fsl_queue->cq;
-
-	list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
-
-	if (flags == RTE_DMA_OP_FLAG_SUBMIT) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->stats.submitted++;
-	} else {
-		fsl_queue->pending++;
+	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
+eq_again:
+	ret = rte_ring_enqueue(fsl_queue->complete_burst,
+			&fsl_queue->desc_in_hw[fsl_queue->ci]);
+	if (ret) {
+		DPAA_QDMA_DP_DEBUG("%s: Queue is full, try dequeue first",
+			__func__);
+		DPAA_QDMA_DP_DEBUG("%s: submitted:%"PRIu64", completed:%"PRIu64"",
+			__func__, fsl_queue->stats.submitted,
+			fsl_queue->stats.completed);
+		dq_cnt = 0;
+dq_again:
+		dq = dpaa_qdma_block_dequeue(fsl_queue->engine,
+			fsl_queue->block_id);
+		dq_cnt++;
+		if (dq > 0) {
+			goto eq_again;
+		} else {
+			if (dq_cnt < 100)
+				goto dq_again;
+			DPAA_QDMA_ERR("%s: Dq block%d failed!",
+				__func__, fsl_queue->block_id);
+		}
+		return ret;
+	}
+	start = fsl_queue->pending_start;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		ret = rte_ring_enqueue(fsl_queue->complete_desc,
+				&fsl_queue->pending_desc[idx]);
+		if (ret) {
+			DPAA_QDMA_ERR("Descriptors eq failed!");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
+{
+	int overflow = 0;
+	uint16_t blk_drain, check_num, drain_num;
+	const struct rte_dma_stats *st = &fsl_queue->stats;
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
+
+	check_num = 0;
+overflow_check:
+	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+			QDMA_QUEUE_CR_WM) ? 1 : 0;
+
+	if (likely(!overflow))
+		return 0;
+
+	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
+		fsl_queue->block_id, fsl_queue->queue_id,
+		st->submitted, st->completed, QDMA_QUEUE_CR_WM);
+	drain_num = 0;
+
+drain_again:
+	blk_drain = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	if (!blk_drain) {
+		drain_num++;
+		if (drain_num >= 10000) {
+			DPAA_QDMA_WARN("TC%d failed drain, Q%d's %"PRIu64" bd in HW.",
+				fsl_queue->block_id, fsl_queue->queue_id,
+				st->submitted - st->completed);
+			drain_num = 0;
+		}
+		goto drain_again;
+	}
+	check_num++;
+	if (check_num >= 1000) {
+		DPAA_QDMA_WARN("TC%d failed check, Q%d's %"PRIu64" bd in HW.",
+			fsl_queue->block_id, fsl_queue->queue_id,
+			st->submitted - st->completed);
+		check_num = 0;
 	}
-	return fsl_comp->index;
+	goto overflow_check;
+
+	return 0;
 }
 
 static int
-fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
+fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
+	dma_addr_t dst, dma_addr_t src, size_t len)
 {
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
 
-	if (fsl_queue->count++)
-		goto finally;
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
 
-	INIT_LIST_HEAD(&fsl_queue->comp_free);
-	INIT_LIST_HEAD(&fsl_queue->comp_used);
+	ft = fsl_queue->ft[fsl_queue->ci];
 
-	ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
-				FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
-	if (ret) {
-		DPAA_QDMA_ERR(
-			"failed to alloc dma buffer for comp descriptor");
-		goto exit;
-	}
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+	qdma_desc_sge_addr_set64(csgf_src, src);
+	csgf_src->length = len;
+	csgf_src->extion = 0;
+	qdma_desc_sge_addr_set64(csgf_dest, dst);
+	csgf_dest->length = len;
+	csgf_dest->extion = 0;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, 1);
+	if (ret)
+		return ret;
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
 
-finally:
-	return fsl_qdma->desc_allocated++;
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
 
-exit:
-	return -ENOMEM;
+	return 0;
 }
 
 static int
-dpaa_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
-	      uint32_t info_sz)
+fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 {
-#define DPAADMA_MAX_DESC        64
-#define DPAADMA_MIN_DESC        64
+	uint8_t *block = fsl_queue->block_vir;
+	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
+	struct fsl_qdma_cmpd_ft *ft;
+	uint32_t total_len;
+	uint16_t start, idx, num, i, next_idx;
+	int ret;
 
-	RTE_SET_USED(dev);
-	RTE_SET_USED(info_sz);
+eq_sg:
+	total_len = 0;
+	start = fsl_queue->pending_start;
+	if (fsl_queue->pending_desc[start].len > s_sg_max_entry_sz ||
+		fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num--;
+		}
+		if (fsl_queue->pending_num > 0)
+			goto eq_sg;
 
-	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
-			     RTE_DMA_CAPA_MEM_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_DEV |
-			     RTE_DMA_CAPA_DEV_TO_MEM |
-			     RTE_DMA_CAPA_SILENT |
-			     RTE_DMA_CAPA_OPS_COPY;
-	dev_info->max_vchans = 1;
-	dev_info->max_desc = DPAADMA_MAX_DESC;
-	dev_info->min_desc = DPAADMA_MIN_DESC;
+		return ret;
+	}
+
+	ret = fsl_qdma_enqueue_overflow(fsl_queue);
+	if (unlikely(ret))
+		return ret;
+
+	if (fsl_queue->pending_num > FSL_QDMA_SG_MAX_ENTRY)
+		num = FSL_QDMA_SG_MAX_ENTRY;
+	else
+		num = fsl_queue->pending_num;
+
+	ft = fsl_queue->ft[fsl_queue->ci];
+	csgf_src = &ft->desc_sbuf;
+	csgf_dest = &ft->desc_dbuf;
+
+	qdma_desc_sge_addr_set64(csgf_src, ft->phy_ssge);
+	csgf_src->extion = 1;
+	qdma_desc_sge_addr_set64(csgf_dest, ft->phy_dsge);
+	csgf_dest->extion = 1;
+	/* This entry is the last entry. */
+	csgf_dest->final = 1;
+	for (i = 0; i < num; i++) {
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		qdma_desc_sge_addr_set64(&ft->desc_ssge[i],
+			fsl_queue->pending_desc[idx].src);
+		ft->desc_ssge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_ssge[i].final = 0;
+		qdma_desc_sge_addr_set64(&ft->desc_dsge[i],
+			fsl_queue->pending_desc[idx].dst);
+		ft->desc_dsge[i].length = fsl_queue->pending_desc[idx].len;
+		ft->desc_dsge[i].final = 0;
+		total_len += fsl_queue->pending_desc[idx].len;
+		if ((i + 1) != num) {
+			next_idx = (idx + 1) & (fsl_queue->pending_max - 1);
+			if (fsl_queue->pending_desc[next_idx].len >
+				s_sg_max_entry_sz) {
+				num = i + 1;
+				break;
+			}
+		}
+	}
+
+	ft->desc_ssge[num - 1].final = 1;
+	ft->desc_dsge[num - 1].final = 1;
+	csgf_src->length = total_len;
+	csgf_dest->length = total_len;
+	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
+	if (ret)
+		return ret;
+
+	fsl_queue->ci = (fsl_queue->ci + 1) & (fsl_queue->n_cq - 1);
+
+	qdma_writel(fsl_queue->le_cqmr | FSL_QDMA_BCQMR_EI,
+		block + FSL_QDMA_BCQMR(fsl_queue->queue_id));
+	fsl_queue->stats.submitted++;
+
+	fsl_queue->pending_start =
+		(start + num) & (fsl_queue->pending_max - 1);
+	fsl_queue->pending_num -= num;
+	if (fsl_queue->pending_num > 0)
+		goto eq_sg;
 
 	return 0;
 }
 
 static int
-dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+fsl_qdma_enqueue_desc(struct fsl_qdma_queue *fsl_queue)
 {
-	u32 i, start, end;
+	uint16_t start = fsl_queue->pending_start;
 	int ret;
 
-	start = fsl_qdma->free_block_id * QDMA_QUEUES;
-	fsl_qdma->free_block_id++;
+	if (fsl_queue->pending_num == 1) {
+		ret = fsl_qdma_enqueue_desc_single(fsl_queue,
+			fsl_queue->pending_desc[start].dst,
+			fsl_queue->pending_desc[start].src,
+			fsl_queue->pending_desc[start].len);
+		if (!ret) {
+			fsl_queue->pending_start =
+				(start + 1) & (fsl_queue->pending_max - 1);
+			fsl_queue->pending_num = 0;
+		}
+		return ret;
+	}
+
+	return fsl_qdma_enqueue_desc_sg(fsl_queue);
+}
 
-	end = start + 1;
-	for (i = start; i < end; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+static int
+dpaa_qdma_info_get(const struct rte_dma_dev *dev,
+	struct rte_dma_info *dev_info, __rte_unused uint32_t info_sz)
+{
+	struct fsl_qdma_engine *fsl_qdma = dev->data->dev_private;
 
-		if (fsl_chan->free) {
-			fsl_chan->free = false;
-			ret = fsl_qdma_alloc_chan_resources(fsl_chan);
-			if (ret)
-				return ret;
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+		RTE_DMA_CAPA_SILENT | RTE_DMA_CAPA_OPS_COPY |
+		RTE_DMA_CAPA_OPS_COPY_SG;
+	dev_info->dev_capa |= DPAA_QDMA_FLAGS_INDEX;
+	dev_info->max_vchans = fsl_qdma->n_queues;
+	dev_info->max_desc = FSL_QDMA_MAX_DESC_NUM;
+	dev_info->min_desc = QDMA_QUEUE_SIZE;
+	dev_info->max_sges = FSL_QDMA_SG_MAX_ENTRY;
 
-			fsl_qdma->vchan_map[vchan] = i;
-			return 0;
+	return 0;
+}
+
+static int
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
+	uint16_t vchan)
+{
+	int ret, i, j, found = 0;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	if (fsl_queue) {
+		found = 1;
+		goto queue_found;
+	}
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++) {
+			fsl_queue = &fsl_qdma->cmd_queues[i][j];
+
+			if (fsl_queue->channel_id == vchan) {
+				found = 1;
+				fsl_qdma->chan[vchan] = fsl_queue;
+				goto queue_found;
+			}
 		}
 	}
 
-	return -1;
-}
+queue_found:
+	if (!found)
+		return -ENXIO;
 
-static void
-dma_release(void *fsl_chan)
-{
-	((struct fsl_qdma_chan *)fsl_chan)->free = true;
-	fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+	if (fsl_queue->used)
+		return 0;
+
+	ret = fsl_qdma_pre_comp_sd_desc(fsl_queue);
+	if (ret)
+		return ret;
+
+	fsl_queue->used = 1;
+	fsl_qdma->block_queues[fsl_queue->block_id]++;
+
+	return 0;
 }
 
 static int
 dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-		    __rte_unused const struct rte_dma_conf *dev_conf,
-		    __rte_unused uint32_t conf_sz)
+	__rte_unused const struct rte_dma_conf *dev_conf,
+	__rte_unused uint32_t conf_sz)
 {
 	return 0;
 }
@@ -745,148 +914,112 @@ dpaa_qdma_queue_setup(struct rte_dma_dev *dmadev,
 static int
 dpaa_qdma_submit(void *dev_private, uint16_t vchan)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-	void *block = fsl_queue->block_base;
-	u32 reg;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	while (fsl_queue->pending) {
-		reg = qdma_readl_be(block + FSL_QDMA_BCQMR(fsl_queue->id));
-		reg |= FSL_QDMA_BCQMR_EI_BE;
-		qdma_writel_be(reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
-		fsl_queue->pending--;
-		fsl_queue->stats.submitted++;
-	}
+	if (!fsl_queue->pending_num)
+		return 0;
 
-	return 0;
+	return fsl_qdma_enqueue_desc(fsl_queue);
 }
 
 static int
 dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
-		  rte_iova_t src, rte_iova_t dst,
-		  uint32_t length, uint64_t flags)
+	rte_iova_t src, rte_iova_t dst,
+	uint32_t length, uint64_t flags)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	uint16_t start = fsl_queue->pending_start;
+	uint8_t pending = fsl_queue->pending_num;
+	uint16_t idx;
 	int ret;
 
-	void *fsl_comp = NULL;
-
-	fsl_comp = fsl_qdma_prep_memcpy(fsl_chan,
-			(dma_addr_t)dst, (dma_addr_t)src,
-			length, NULL, NULL);
-	if (!fsl_comp) {
-		DPAA_QDMA_DP_DEBUG("fsl_comp is NULL");
-		return -1;
+	if (pending >= fsl_queue->pending_max) {
+		DPAA_QDMA_ERR("Too many pending jobs(%d) on queue%d",
+			pending, vchan);
+		return -ENOSPC;
 	}
-	ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags);
+	idx = (start + pending) & (fsl_queue->pending_max - 1);
+
+	fsl_queue->pending_desc[idx].src = src;
+	fsl_queue->pending_desc[idx].dst = dst;
+	fsl_queue->pending_desc[idx].flag =
+		DPAA_QDMA_IDX_FROM_FLAG(flags);
+	fsl_queue->pending_desc[idx].len = length;
+	fsl_queue->pending_num++;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
 
 	return ret;
 }
 
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
-			 const uint16_t nb_cpls, uint16_t *last_idx,
-			 enum rte_dma_status_code *st)
+	const uint16_t nb_cpls, uint16_t *last_idx,
+	enum rte_dma_status_code *st)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		fsl_queue->stats.errors++;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
+
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+			fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
+
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	if (st) {
+		for (i = 0; i < dq_num; i++)
+			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
-
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, st);
-	fsl_queue->stats.completed += intr;
 
-	return intr;
+	return dq_num;
 }
 
-
 static uint16_t
 dpaa_qdma_dequeue(void *dev_private,
-		  uint16_t vchan, const uint16_t nb_cpls,
-		  uint16_t *last_idx, bool *has_error)
+	uint16_t vchan, const uint16_t nb_cpls,
+	uint16_t *last_idx, bool *has_error)
 {
-	struct fsl_qdma_engine *fsl_qdma = (struct fsl_qdma_engine *)dev_private;
-	int id = (int)((fsl_qdma->vchan_map[vchan]) / QDMA_QUEUES);
-	void *block;
-	int intr;
-	void *status = fsl_qdma->status_base;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
-
-	intr = qdma_readl_be(status + FSL_QDMA_DEDR);
-	if (intr) {
-		DPAA_QDMA_ERR("DMA transaction error! %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW0R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW1R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW2R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFDW3R);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECFQIDR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr);
-		intr = qdma_readl(status + FSL_QDMA_DECBR);
-		DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr);
-		qdma_writel(0xffffffff,
-			    status + FSL_QDMA_DEDR);
-		intr = qdma_readl(status + FSL_QDMA_DEDR);
-		*has_error = true;
-		fsl_queue->stats.errors++;
-	}
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	struct fsl_qdma_desc *desc_complete[nb_cpls];
+	uint16_t i, dq_num;
 
-	block = fsl_qdma->block_base +
-		FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 
-	intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id, nb_cpls,
-						last_idx, NULL);
-	fsl_queue->stats.completed += intr;
+	*has_error = false;
+	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
+		fsl_queue->block_id);
+	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
+		__func__, dq_num);
 
-	return intr;
+	dq_num = rte_ring_dequeue_burst(fsl_queue->complete_pool,
+			(void **)desc_complete, nb_cpls, NULL);
+	for (i = 0; i < dq_num; i++)
+		last_idx[i] = desc_complete[i]->flag;
+
+	return dq_num;
 }
 
 static int
-dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
-		    struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa_qdma_stats_get(const struct rte_dma_dev *dmadev,
+	uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 	struct rte_dma_stats *stats = &fsl_queue->stats;
 
 	if (size < sizeof(rte_stats))
@@ -903,17 +1036,15 @@ static int
 dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan =
-		&fsl_qdma->chans[fsl_qdma->vchan_map[vchan]];
-	struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
 
-	fsl_queue->stats = (struct rte_dma_stats){0};
+	memset(&fsl_queue->stats, 0, sizeof(struct rte_dma_stats));
 
 	return 0;
 }
 
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
-	.dev_info_get		  = dpaa_info_get,
+	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
 	.dev_start                = dpaa_qdma_start,
 	.dev_close                = dpaa_qdma_close,
@@ -926,90 +1057,80 @@ static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	struct fsl_qdma_chan *fsl_chan;
 	uint64_t phys_addr;
-	unsigned int len;
 	int ccsr_qdma_fd;
 	int regs_size;
 	int ret;
-	u32 i;
+	uint32_t i, j, k;
 
-	fsl_qdma->desc_allocated = 0;
-	fsl_qdma->n_chans = VIRT_CHANNELS;
-	fsl_qdma->n_queues = QDMA_QUEUES;
+	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
 
-	len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
-	fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
-	if (!fsl_qdma->chans)
-		return -1;
-
-	len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
-	fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
-	if (!fsl_qdma->status) {
-		rte_free(fsl_qdma->chans);
-		return -1;
-	}
-
-	for (i = 0; i < fsl_qdma->num_blocks; i++) {
-		rte_atomic32_init(&wait_task[i]);
-		fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
-		if (!fsl_qdma->status[i])
-			goto err;
-	}
-
 	ccsr_qdma_fd = open("/dev/mem", O_RDWR);
 	if (unlikely(ccsr_qdma_fd < 0)) {
 		DPAA_QDMA_ERR("Can not open /dev/mem for qdma CCSR map");
-		goto err;
+		return ccsr_qdma_fd;
 	}
 
-	regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 	phys_addr = QDMA_CCSR_BASE;
-	fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
-					 PROT_WRITE, MAP_SHARED,
-					 ccsr_qdma_fd, phys_addr);
+	fsl_qdma->reg_base = mmap(NULL, regs_size,
+		PROT_READ | PROT_WRITE, MAP_SHARED,
+		ccsr_qdma_fd, phys_addr);
 
 	close(ccsr_qdma_fd);
-	if (fsl_qdma->ctrl_base == MAP_FAILED) {
-		DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64
-		       "size %d", phys_addr, regs_size);
-		goto err;
+	if (fsl_qdma->reg_base == MAP_FAILED) {
+		DPAA_QDMA_ERR("Map qdma reg: Phys(0x%"PRIx64"), size(%d)",
+			phys_addr, regs_size);
+		return -ENOMEM;
 	}
 
-	fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
-	fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
-
-	fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
-	if (!fsl_qdma->queue) {
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+	fsl_qdma->ctrl_base =
+		fsl_qdma->reg_base + QDMA_CTRL_REGION_OFFSET;
+	fsl_qdma->status_base =
+		fsl_qdma->reg_base + QDMA_STATUS_REGION_OFFSET;
+	fsl_qdma->block_base =
+		fsl_qdma->status_base + QDMA_STATUS_REGION_SIZE;
+
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		ret = fsl_qdma_prep_status_queue(fsl_qdma, i);
+		if (ret)
+			goto mem_free;
 	}
 
-	for (i = 0; i < fsl_qdma->n_chans; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
-
-		fsl_chan->qdma = fsl_qdma;
-		fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
-							fsl_qdma->num_blocks);
-		fsl_chan->free = true;
+	k = 0;
+	for (i = 0; i < QDMA_QUEUES; i++) {
+		for (j = 0; j < QDMA_BLOCKS; j++) {
+			ret = fsl_qdma_alloc_queue_resources(fsl_qdma, i, j);
+			if (ret)
+				goto mem_free;
+			fsl_qdma->cmd_queues[j][i].channel_id = k;
+			k++;
+		}
 	}
 
 	ret = fsl_qdma_reg_init(fsl_qdma);
 	if (ret) {
 		DPAA_QDMA_ERR("Can't Initialize the qDMA engine.");
-		munmap(fsl_qdma->ctrl_base, regs_size);
-		goto err;
+		goto mem_free;
 	}
 
 	return 0;
 
-err:
-	rte_free(fsl_qdma->chans);
-	rte_free(fsl_qdma->status);
+mem_free:
+	for (i = 0; i < fsl_qdma->num_blocks; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
+
+	for (i = 0; i < fsl_qdma->num_blocks; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
+	}
 
-	return -1;
+	munmap(fsl_qdma->ctrl_base, regs_size);
+
+	return ret;
 }
 
 static int
@@ -1052,17 +1173,20 @@ dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
 {
 	struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
 	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
-	int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+	uint32_t i, j, regs_size;
+
+	regs_size = fsl_qdma->block_offset * fsl_qdma->num_blocks;
+	regs_size += (QDMA_CTRL_REGION_SIZE + QDMA_STATUS_REGION_SIZE);
 
-	for (i = 0; i < max; i++) {
-		struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+	for (i = 0; i < QDMA_BLOCKS; i++)
+		fsl_qdma_free_stq_res(&fsl_qdma->stat_queues[i]);
 
-		if (fsl_chan->free == false)
-			dma_release(fsl_chan);
+	for (i = 0; i < QDMA_BLOCKS; i++) {
+		for (j = 0; j < QDMA_QUEUES; j++)
+			fsl_qdma_free_cmdq_res(&fsl_qdma->cmd_queues[i][j]);
 	}
 
-	rte_free(fsl_qdma->status);
-	rte_free(fsl_qdma->chans);
+	munmap(fsl_qdma->ctrl_base, regs_size);
 
 	(void)rte_dma_pmd_release(dpaa_dev->device.name);
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 7e9e76e21a..75c014f32f 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2021 NXP
+ * Copyright 2021-2024 NXP
  */
 
 #ifndef _DPAA_QDMA_H_
@@ -11,7 +11,6 @@
 #define BIT(nr)		(1UL << (nr))
 #endif
 
-#define CORE_NUMBER 4
 #define RETRIES	5
 
 #ifndef GENMASK
@@ -20,6 +19,14 @@
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 #endif
 
+#define QDMA_CTRL_REGION_OFFSET 0
+#define QDMA_CTRL_REGION_SIZE 0x10000
+#define QDMA_STATUS_REGION_OFFSET \
+	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
+#define QDMA_STATUS_REGION_SIZE 0x10000
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
 #define FSL_QDMA_DEDR			0xe04
@@ -54,15 +61,16 @@
 #define FSL_QDMA_QUEUE_MAX		8
 
 #define FSL_QDMA_BCQMR_EN		0x80000000
-#define FSL_QDMA_BCQMR_EI_BE		0x40
+#define FSL_QDMA_BCQMR_EI		0x40000000
+
 #define FSL_QDMA_BCQMR_CD_THLD(x)	((x) << 20)
 #define FSL_QDMA_BCQMR_CQ_SIZE(x)	((x) << 16)
 
 #define FSL_QDMA_BCQSR_QF_XOFF_BE	0x1000100
 
 #define FSL_QDMA_BSQMR_EN		0x80000000
-#define FSL_QDMA_BSQMR_DI_BE		0x40
 #define FSL_QDMA_BSQMR_CQ_SIZE(x)	((x) << 16)
+#define FSL_QDMA_BSQMR_DI		0xc0
 
 #define FSL_QDMA_BSQSR_QE_BE		0x200
 
@@ -75,23 +83,14 @@
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
 
+#define FSL_QDMA_COMP_SG_FORMAT		0x1
+
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
 #define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
 #define FSL_QDMA_CMD_LWC_OFFSET		16
 
-#define QDMA_CCDF_STATUS		20
-#define QDMA_CCDF_OFFSET		20
-#define QDMA_CCDF_MASK			GENMASK(28, 20)
-#define QDMA_CCDF_FOTMAT		BIT(29)
-#define QDMA_CCDF_SER			BIT(30)
-
-#define QDMA_SG_FIN			BIT(30)
-#define QDMA_SG_LEN_MASK		GENMASK(29, 0)
-
-#define COMMAND_QUEUE_OVERFLOW		10
-
 /* qdma engine attribute */
 #define QDMA_QUEUE_SIZE			64
 #define QDMA_STATUS_SIZE		64
@@ -101,6 +100,7 @@
 #define QDMA_BLOCKS			4
 #define QDMA_QUEUES			8
 #define QDMA_DELAY			1000
+#define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
 #ifdef QDMA_BIG_ENDIAN
@@ -118,89 +118,145 @@
 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)			\
 	(((fsl_qdma_engine)->block_offset) * (x))
 
-typedef void (*dma_call_back)(void *params);
-
 /* qDMA Command Descriptor Formats */
-struct fsl_qdma_format {
-	__le32 status; /* ser, status */
-	__le32 cfg;	/* format, offset */
-	union {
-		struct {
-			__le32 addr_lo;	/* low 32-bits of 40-bit address */
-			u8 addr_hi;	/* high 8-bits of 40-bit address */
-			u8 __reserved1[2];
-			u8 cfg8b_w1; /* dd, queue */
-		};
-		__le64 data;
-	};
-};
+struct fsl_qdma_comp_cmd_desc {
+	uint8_t status;
+	uint32_t rsv0:22;
+	uint32_t ser:1;
+	uint32_t rsv1:21;
+	uint32_t offset:9;
+	uint32_t format:3;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint16_t rsv3;
+	uint8_t queue:3;
+	uint8_t rsv4:3;
+	uint8_t dd:2;
+} __rte_packed;
+
+struct fsl_qdma_comp_sg_desc {
+	uint32_t offset:13;
+	uint32_t rsv0:19;
+	uint32_t length:30;
+	uint32_t final:1;
+	uint32_t extion:1;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+	uint32_t rsv1:24;
+} __rte_packed;
 
-/* qDMA Source Descriptor Format */
 struct fsl_qdma_sdf {
-	__le32 rev3;
-	__le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
-	__le32 rev5;
-	__le32 cmd;
-};
+	uint32_t rsv0;
+	uint32_t ssd:12;
+	uint32_t sss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint32_t rsv3:17;
+	uint32_t prefetch:1;
+	uint32_t rsv4:1;
+	uint32_t ssen:1;
+	uint32_t rthrotl:4;
+	uint32_t sqos:3;
+	uint32_t ns:1;
+	uint32_t srttype:4;
+} __rte_packed;
 
-/* qDMA Destination Descriptor Format */
 struct fsl_qdma_ddf {
-	__le32 rev1;
-	__le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
-	__le32 rev3;
-	__le32 cmd;
+	uint32_t rsv0;
+	uint32_t dsd:12;
+	uint32_t dss:12;
+	uint32_t rsv1:8;
+	uint32_t rsv2;
+
+	uint16_t rsv3;
+	uint32_t lwc:2;
+	uint32_t rsv4:1;
+	uint32_t dsen:1;
+	uint32_t wthrotl:4;
+	uint32_t dqos:3;
+	uint32_t ns:1;
+	uint32_t dwttype:4;
+} __rte_packed;
+
+struct fsl_qdma_df {
+	struct fsl_qdma_sdf sdf;
+	struct fsl_qdma_ddf ddf;
 };
 
-struct fsl_qdma_chan {
-	struct fsl_qdma_engine	*qdma;
-	struct fsl_qdma_queue	*queue;
-	bool			free;
-	struct list_head	list;
+#define FSL_QDMA_SG_MAX_ENTRY 64
+#define FSL_QDMA_MAX_DESC_NUM (FSL_QDMA_SG_MAX_ENTRY * QDMA_QUEUE_SIZE)
+struct fsl_qdma_cmpd_ft {
+	struct fsl_qdma_comp_sg_desc desc_buf;
+	struct fsl_qdma_comp_sg_desc desc_sbuf;
+	struct fsl_qdma_comp_sg_desc desc_dbuf;
+	uint64_t cache_align[2];
+	struct fsl_qdma_comp_sg_desc desc_ssge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_comp_sg_desc desc_dsge[FSL_QDMA_SG_MAX_ENTRY];
+	struct fsl_qdma_df df;
+	uint64_t phy_ssge;
+	uint64_t phy_dsge;
+	uint64_t phy_df;
+} __rte_packed;
+
+#define DPAA_QDMA_IDX_FROM_FLAG(flag) \
+	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
+
+struct fsl_qdma_desc {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint64_t flag;
+	uint64_t len;
 };
 
 struct fsl_qdma_queue {
-	struct fsl_qdma_format	*virt_head;
-	struct list_head	comp_used;
-	struct list_head	comp_free;
-	dma_addr_t		bus_addr;
-	u32			n_cq;
-	u32			id;
-	u32			count;
-	u32			pending;
-	struct fsl_qdma_format	*cq;
-	void			*block_base;
-	struct rte_dma_stats	stats;
+	int used;
+	struct fsl_qdma_cmpd_ft **ft;
+	uint16_t ci;
+	struct rte_ring *complete_burst;
+	struct rte_ring *complete_desc;
+	struct rte_ring *complete_pool;
+	uint16_t n_cq;
+	uint8_t block_id;
+	uint8_t queue_id;
+	uint8_t channel_id;
+	void *block_vir;
+	uint32_t le_cqmr;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	uint16_t desc_in_hw[QDMA_QUEUE_SIZE];
+	struct rte_dma_stats stats;
+	struct fsl_qdma_desc *pending_desc;
+	uint16_t pending_max;
+	uint16_t pending_start;
+	uint16_t pending_num;
+	uint16_t complete_start;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
-struct fsl_qdma_comp {
-	dma_addr_t		bus_addr;
-	dma_addr_t		desc_bus_addr;
-	void			*virt_addr;
-	int			index;
-	void			*desc_virt_addr;
-	struct fsl_qdma_chan	*qchan;
-	dma_call_back		call_back_func;
-	void			*params;
-	struct list_head	list;
+struct fsl_qdma_status_queue {
+	uint16_t n_cq;
+	uint16_t complete;
+	uint8_t block_id;
+	void *block_vir;
+	struct fsl_qdma_comp_cmd_desc *cq;
+	struct rte_dma_stats stats;
+	dma_addr_t bus_addr;
+	void *engine;
 };
 
 struct fsl_qdma_engine {
-	int			desc_allocated;
-	void			*ctrl_base;
-	void			*status_base;
-	void			*block_base;
-	u32			n_chans;
-	u32			n_queues;
-	int			error_irq;
-	struct fsl_qdma_queue	*queue;
-	struct fsl_qdma_queue	**status;
-	struct fsl_qdma_chan	*chans;
-	u32			num_blocks;
-	u8			free_block_id;
-	u32			vchan_map[4];
-	int			block_offset;
+	void *reg_base;
+	void *ctrl_base;
+	void *status_base;
+	void *block_base;
+	uint32_t n_queues;
+	uint8_t block_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue cmd_queues[QDMA_BLOCKS][QDMA_QUEUES];
+	struct fsl_qdma_status_queue stat_queues[QDMA_BLOCKS];
+	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
+	uint32_t num_blocks;
+	int block_offset;
 };
 
-static rte_atomic32_t wait_task[CORE_NUMBER];
-
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 09/15] dma/dpaa: support burst capacity API
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (7 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 08/15] dma/dpaa: refactor driver Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 10/15] dma/dpaa: add silent mode support Gagandeep Singh
                                     ` (5 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

This patch improves the dpaa qdma driver and
adds dpaa_qdma_burst_capacity API which returns the
remaining space in the descriptor ring.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index d162d84c45..bb0d1a2bd4 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1043,6 +1043,15 @@ dpaa_qdma_stats_reset(struct rte_dma_dev *dmadev, uint16_t vchan)
 	return 0;
 }
 
+static uint16_t
+dpaa_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
+{
+	const struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+
+	return fsl_queue->pending_max - fsl_queue->pending_num;
+}
+
 static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.dev_info_get		  = dpaa_qdma_info_get,
 	.dev_configure            = dpaa_qdma_configure,
@@ -1156,6 +1165,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
+	dmadev->fp_obj->burst_capacity = dpaa_qdma_burst_capacity;
 
 	/* Invoke PMD device initialization function */
 	ret = dpaa_qdma_init(dmadev);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 10/15] dma/dpaa: add silent mode support
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (8 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:13                   ` [v7 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
                                     ` (4 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add silent mode support.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 46 ++++++++++++++++++++++++++++++++----
 drivers/dma/dpaa/dpaa_qdma.h |  1 +
 2 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index bb0d1a2bd4..3ae87fdf80 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -119,6 +119,7 @@ dma_pool_alloc(char *nm, int size, int aligned, dma_addr_t *phy_addr)
 static int
 fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 {
+	struct fsl_qdma_engine *fsl_qdma = queue->engine;
 	struct fsl_qdma_sdf *sdf;
 	struct fsl_qdma_ddf *ddf;
 	struct fsl_qdma_comp_cmd_desc *ccdf;
@@ -173,7 +174,8 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 		ccdf = &queue->cq[i];
 		qdma_desc_addr_set64(ccdf, phy_ft);
 		ccdf->format = FSL_QDMA_COMP_SG_FORMAT;
-
+		if (!fsl_qdma->is_silent)
+			ccdf->ser = 1;
 		ccdf->queue = queue->queue_id;
 	}
 	queue->ci = 0;
@@ -579,9 +581,12 @@ static int
 fsl_qdma_enqueue_desc_to_ring(struct fsl_qdma_queue *fsl_queue,
 	uint16_t num)
 {
+	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 	uint16_t i, idx, start, dq;
 	int ret, dq_cnt;
 
+	if (fsl_qdma->is_silent)
+		return 0;
 
 	fsl_queue->desc_in_hw[fsl_queue->ci] = num;
 eq_again:
@@ -626,17 +631,34 @@ static int
 fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 {
 	int overflow = 0;
+	uint32_t reg;
 	uint16_t blk_drain, check_num, drain_num;
+	uint8_t *block = fsl_queue->block_vir;
 	const struct rte_dma_stats *st = &fsl_queue->stats;
 	struct fsl_qdma_engine *fsl_qdma = fsl_queue->engine;
 
 	check_num = 0;
 overflow_check:
-	overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
+	if (fsl_qdma->is_silent) {
+		reg = qdma_readl_be(block +
+			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
+		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
+			1 : 0;
+	} else {
+		overflow = (fsl_qdma_queue_bd_in_hw(fsl_queue) >=
 			QDMA_QUEUE_CR_WM) ? 1 : 0;
+	}
 
-	if (likely(!overflow))
+	if (likely(!overflow)) {
 		return 0;
+	} else if (fsl_qdma->is_silent) {
+		check_num++;
+		if (check_num >= 10000) {
+			DPAA_QDMA_WARN("Waiting for HW complete in silent mode");
+			check_num = 0;
+		}
+		goto overflow_check;
+	}
 
 	DPAA_QDMA_DP_DEBUG("TC%d/Q%d submitted(%"PRIu64")-completed(%"PRIu64") >= %d",
 		fsl_queue->block_id, fsl_queue->queue_id,
@@ -881,10 +903,13 @@ dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,
 }
 
 static int
-dpaa_qdma_configure(__rte_unused struct rte_dma_dev *dmadev,
-	__rte_unused const struct rte_dma_conf *dev_conf,
+dpaa_qdma_configure(struct rte_dma_dev *dmadev,
+	const struct rte_dma_conf *dev_conf,
 	__rte_unused uint32_t conf_sz)
 {
+	struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+
+	fsl_qdma->is_silent = dev_conf->enable_silent;
 	return 0;
 }
 
@@ -970,6 +995,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode");
+
+		return 0;
+	}
+
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
 			fsl_queue->block_id);
 	DPAA_QDMA_DP_DEBUG("%s: block dq(%d)",
@@ -999,6 +1030,11 @@ dpaa_qdma_dequeue(void *dev_private,
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
+	if (unlikely(fsl_qdma->is_silent)) {
+		DPAA_QDMA_WARN("Can't dq in silent mode");
+
+		return 0;
+	}
 
 	*has_error = false;
 	dq_num = dpaa_qdma_block_dequeue(fsl_qdma,
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 75c014f32f..9b69db517e 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -257,6 +257,7 @@ struct fsl_qdma_engine {
 	struct fsl_qdma_queue *chan[QDMA_BLOCKS * QDMA_QUEUES];
 	uint32_t num_blocks;
 	int block_offset;
+	int is_silent;
 };
 
 #endif /* _DPAA_QDMA_H_ */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 11/15] dma/dpaa: add workaround for ERR050757
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (9 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 10/15] dma/dpaa: add silent mode support Gagandeep Singh
@ 2024-10-15  7:13                   ` Gagandeep Singh
  2024-10-15  7:14                   ` [v7 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
                                     ` (3 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:13 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

ERR050757 on LS104x indicates:

For outbound PCIe read transactions, a completion buffer is used
to store the PCIe completions till the data is passed back to the
initiator. At most 16 outstanding transactions are allowed and
maximum read request is 256 bytes. The completion buffer size
inside the controller needs to be at least 4KB, but the PCIe
controller has 3 KB of buffer. In case the size of pending
outbound read transactions of more than 3KB, the PCIe controller
may drop the incoming completions without notifying the initiator
of the transaction, leaving transactions unfinished. All
subsequent outbound reads to PCIe are blocked permanently.
To avoid qDMA hang as it keeps waiting for data that was silently
dropped, set stride mode for qDMA.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       |  3 ++-
 doc/guides/dmadevs/dpaa.rst  |  2 ++
 drivers/dma/dpaa/dpaa_qdma.c | 38 +++++++++++++++++++++++++++++++++---
 drivers/dma/dpaa/dpaa_qdma.h | 19 +++++++-----------
 4 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index a3bdcf1aa2..e49a5f64f0 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -488,7 +488,8 @@ soc_dpaa = {
         ['RTE_MACHINE', '"dpaa"'],
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
-        ['RTE_MAX_NUMA_NODES', 1]
+        ['RTE_MAX_NUMA_NODES', 1],
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index f99bfc6087..746919ec6b 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -42,6 +42,8 @@ Compilation
 For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
+- ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+
 Initialization
 --------------
 
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 3ae87fdf80..ffc2a73a17 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,7 +167,6 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
-
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -449,8 +448,9 @@ fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 
 			/* Initialize the queue mode. */
 			reg = FSL_QDMA_BCQMR_EN;
-			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
-			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
+			reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2_qthld(temp->n_cq));
+			reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2_qsize(temp->n_cq));
+			temp->le_cqmr = reg;
 			qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
 		}
 
@@ -698,6 +698,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 	struct fsl_qdma_comp_sg_desc *csgf_src, *csgf_dest;
 	struct fsl_qdma_cmpd_ft *ft;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 	ret = fsl_qdma_enqueue_overflow(fsl_queue);
 	if (unlikely(ret))
@@ -705,6 +708,19 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 
 	ft = fsl_queue->ft[fsl_queue->ci];
 
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	csgf_src = &ft->desc_sbuf;
 	csgf_dest = &ft->desc_dbuf;
 	qdma_desc_sge_addr_set64(csgf_src, src);
@@ -737,6 +753,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	uint32_t total_len;
 	uint16_t start, idx, num, i, next_idx;
 	int ret;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	struct fsl_qdma_sdf *sdf;
+#endif
 
 eq_sg:
 	total_len = 0;
@@ -802,6 +821,19 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 	ft->desc_dsge[num - 1].final = 1;
 	csgf_src->length = total_len;
 	csgf_dest->length = total_len;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050757
+	sdf = &ft->df.sdf;
+	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
+		sdf->ssen = 1;
+		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
+		sdf->ssd = FSL_QDMA_CMD_SS_ERR050757_LEN;
+	} else {
+		sdf->ssen = 0;
+		sdf->sss = 0;
+		sdf->ssd = 0;
+	}
+#endif
 	ret = fsl_qdma_enqueue_desc_to_ring(fsl_queue, num);
 	if (ret)
 		return ret;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 9b69db517e..171c093117 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -77,8 +77,6 @@
 #define FSL_QDMA_DMR_DQD		0x40000000
 #define FSL_QDMA_DSR_DB			0x80000000
 
-#define FSL_QDMA_COMMAND_BUFFER_SIZE	64
-#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN	64
 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX	16384
 #define FSL_QDMA_QUEUE_NUM_MAX		8
@@ -88,18 +86,15 @@
 #define FSL_QDMA_CMD_RWTTYPE		0x4
 #define FSL_QDMA_CMD_LWC		0x2
 
-#define FSL_QDMA_CMD_RWTTYPE_OFFSET	28
-#define FSL_QDMA_CMD_LWC_OFFSET		16
+#define FSL_QDMA_CMD_SS_ERR050757_LEN 128
 
 /* qdma engine attribute */
-#define QDMA_QUEUE_SIZE			64
-#define QDMA_STATUS_SIZE		64
-#define QDMA_CCSR_BASE			0x8380000
-#define VIRT_CHANNELS			32
-#define QDMA_BLOCK_OFFSET		0x10000
-#define QDMA_BLOCKS			4
-#define QDMA_QUEUES			8
-#define QDMA_DELAY			1000
+#define QDMA_QUEUE_SIZE FSL_QDMA_CIRCULAR_DESC_SIZE_MIN
+#define QDMA_STATUS_SIZE QDMA_QUEUE_SIZE
+#define QDMA_CCSR_BASE 0x8380000
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
 #define QDMA_QUEUE_CR_WM 32
 
 #define QDMA_BIG_ENDIAN			1
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 12/15] dma/dpaa: qdma stall workaround for ERR050265
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (10 preceding siblings ...)
  2024-10-15  7:13                   ` [v7 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
@ 2024-10-15  7:14                   ` Gagandeep Singh
  2024-10-15  7:14                   ` [v7 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
                                     ` (2 subsequent siblings)
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:14 UTC (permalink / raw)
  To: dev, Wathsala Vithanage, Bruce Richardson, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Non-prefetchable read setting in the source descriptor may be
required for targets other than local memory. Prefetchable read
setting will offer better performance for misaligned transfers
in the form of fewer transactions and should be set if possible.
This patch also fixes QDMA stall issue due to unaligned
transactions.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 config/arm/meson.build       | 3 ++-
 doc/guides/dmadevs/dpaa.rst  | 1 +
 drivers/dma/dpaa/dpaa_qdma.c | 9 +++++++++
 3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/config/arm/meson.build b/config/arm/meson.build
index e49a5f64f0..4cb5e74c72 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -489,7 +489,8 @@ soc_dpaa = {
         ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false],
         ['RTE_MAX_LCORE', 16],
         ['RTE_MAX_NUMA_NODES', 1],
-	['RTE_DMA_DPAA_ERRATA_ERR050757', true]
+	['RTE_DMA_DPAA_ERRATA_ERR050757', true],
+	['RTE_DMA_DPAA_ERRATA_ERR050265', true]
     ],
     'numa': false
 }
diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 746919ec6b..8a7c0befc3 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -43,6 +43,7 @@ For builds using ``meson`` and ``ninja``, the driver will be built when the
 target platform is dpaa-based. No additional compilation steps are necessary.
 
 - ``RTE_DMA_DPAA_ERRATA_ERR050757`` - enable software workaround for Errata-A050757
+- ``RTE_DMA_DPAA_ERRATA_ERR050265`` - enable software workaround for Errata-A050265
 
 Initialization
 --------------
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index ffc2a73a17..6a6cfa85cc 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -167,6 +167,9 @@ fsl_qdma_pre_comp_sd_desc(struct fsl_qdma_queue *queue)
 
 		/* Descriptor Buffer */
 		sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+		sdf->prefetch = 1;
+#endif
 		ddf->dwttype = FSL_QDMA_CMD_RWTTYPE;
 		ddf->lwc = FSL_QDMA_CMD_LWC;
 
@@ -711,6 +714,9 @@ fsl_qdma_enqueue_desc_single(struct fsl_qdma_queue *fsl_queue,
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
@@ -824,6 +830,9 @@ fsl_qdma_enqueue_desc_sg(struct fsl_qdma_queue *fsl_queue)
 #ifdef RTE_DMA_DPAA_ERRATA_ERR050757
 	sdf = &ft->df.sdf;
 	sdf->srttype = FSL_QDMA_CMD_RWTTYPE;
+#ifdef RTE_DMA_DPAA_ERRATA_ERR050265
+	sdf->prefetch = 1;
+#endif
 	if (total_len > FSL_QDMA_CMD_SS_ERR050757_LEN) {
 		sdf->ssen = 1;
 		sdf->sss = FSL_QDMA_CMD_SS_ERR050757_LEN;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 13/15] dma/dpaa: add Scatter Gather support
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (11 preceding siblings ...)
  2024-10-15  7:14                   ` [v7 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
@ 2024-10-15  7:14                   ` Gagandeep Singh
  2024-10-15  7:14                   ` [v7 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
  2024-10-15  7:14                   ` [v7 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:14 UTC (permalink / raw)
  To: dev, Sachin Saxena; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

Support copy_sg operation for scatter gather.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 55 ++++++++++++++++++++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h | 10 ++++++-
 2 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 6a6cfa85cc..2bde91f563 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -1025,6 +1025,60 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan,
 	return ret;
 }
 
+static int
+dpaa_qdma_copy_sg(void *dev_private,
+	uint16_t vchan,
+	const struct rte_dma_sge *src,
+	const struct rte_dma_sge *dst,
+	uint16_t nb_src, uint16_t nb_dst,
+	uint64_t flags)
+{
+	int ret;
+	uint16_t i, start, idx;
+	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	const uint16_t *idx_addr = NULL;
+
+	if (unlikely(nb_src != nb_dst)) {
+		DPAA_QDMA_ERR("%s: nb_src(%d) != nb_dst(%d) on  queue%d",
+			__func__, nb_src, nb_dst, vchan);
+		return -EINVAL;
+	}
+
+	if ((fsl_queue->pending_num + nb_src) > FSL_QDMA_SG_MAX_ENTRY) {
+		DPAA_QDMA_ERR("Too many pending jobs on queue%d",
+			vchan);
+		return -ENOSPC;
+	}
+	start = fsl_queue->pending_start + fsl_queue->pending_num;
+	start = start & (fsl_queue->pending_max - 1);
+	idx = start;
+
+	idx_addr = DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flags);
+
+	for (i = 0; i < nb_src; i++) {
+		if (unlikely(src[i].length != dst[i].length)) {
+			DPAA_QDMA_ERR("src.len(%d) != dst.len(%d)",
+				src[i].length, dst[i].length);
+			return -EINVAL;
+		}
+		idx = (start + i) & (fsl_queue->pending_max - 1);
+		fsl_queue->pending_desc[idx].src = src[i].addr;
+		fsl_queue->pending_desc[idx].dst = dst[i].addr;
+		fsl_queue->pending_desc[idx].len = dst[i].length;
+		fsl_queue->pending_desc[idx].flag = idx_addr[i];
+	}
+	fsl_queue->pending_num += nb_src;
+
+	if (!(flags & RTE_DMA_OP_FLAG_SUBMIT))
+		return idx;
+
+	ret = fsl_qdma_enqueue_desc(fsl_queue);
+	if (!ret)
+		return fsl_queue->pending_start;
+
+	return ret;
+}
 
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
@@ -1239,6 +1293,7 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
 	dmadev->device = &dpaa_dev->device;
 	dmadev->fp_obj->dev_private = dmadev->data->dev_private;
 	dmadev->fp_obj->copy = dpaa_qdma_enqueue;
+	dmadev->fp_obj->copy_sg = dpaa_qdma_copy_sg;
 	dmadev->fp_obj->submit = dpaa_qdma_submit;
 	dmadev->fp_obj->completed = dpaa_qdma_dequeue;
 	dmadev->fp_obj->completed_status = dpaa_qdma_dequeue_status;
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 171c093117..1e820d0207 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -24,8 +24,13 @@
 #define QDMA_STATUS_REGION_OFFSET \
 	(QDMA_CTRL_REGION_OFFSET + QDMA_CTRL_REGION_SIZE)
 #define QDMA_STATUS_REGION_SIZE 0x10000
-#define DPAA_QDMA_COPY_IDX_OFFSET 8
+
 #define DPAA_QDMA_FLAGS_INDEX RTE_BIT64(63)
+#define DPAA_QDMA_COPY_IDX_OFFSET 8
+#define DPAA_QDMA_SG_IDX_ADDR_ALIGN \
+	RTE_BIT64(DPAA_QDMA_COPY_IDX_OFFSET)
+#define DPAA_QDMA_SG_IDX_ADDR_MASK \
+	(DPAA_QDMA_SG_IDX_ADDR_ALIGN - 1)
 
 #define FSL_QDMA_DMR			0x0
 #define FSL_QDMA_DSR			0x4
@@ -194,6 +199,9 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
+	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
+
 #define DPAA_QDMA_IDX_FROM_FLAG(flag) \
 	((flag) >> DPAA_QDMA_COPY_IDX_OFFSET)
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 14/15] dma/dpaa: add DMA error checks
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (12 preceding siblings ...)
  2024-10-15  7:14                   ` [v7 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
@ 2024-10-15  7:14                   ` Gagandeep Singh
  2024-10-15  7:14                   ` [v7 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:14 UTC (permalink / raw)
  To: dev, Sachin Saxena, Hemant Agrawal; +Cc: Jun Yang

From: Jun Yang <jun.yang@nxp.com>

add user configurable DMA error checks.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/dmadevs/dpaa.rst      |   6 ++
 drivers/dma/dpaa/dpaa_qdma.c     | 135 ++++++++++++++++++++++++++++++-
 drivers/dma/dpaa/dpaa_qdma.h     |  42 ++++++++++
 drivers/net/dpaa2/dpaa2_ethdev.c |   2 +-
 4 files changed, 183 insertions(+), 2 deletions(-)

diff --git a/doc/guides/dmadevs/dpaa.rst b/doc/guides/dmadevs/dpaa.rst
index 8a7c0befc3..a60457229a 100644
--- a/doc/guides/dmadevs/dpaa.rst
+++ b/doc/guides/dmadevs/dpaa.rst
@@ -69,3 +69,9 @@ Platform Requirement
 
 DPAA DMA driver for DPDK can only work on NXP SoCs
 as listed in the `Supported DPAA SoCs`_.
+
+Device Arguments
+----------------
+
+Use dev arg option ``dpaa_dma_err_check=1`` to check DMA errors at
+driver level. usage example: ``dpaa_bus:dpaa_qdma-1,dpaa_dma_err_check=1``
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 2bde91f563..ca349e97b2 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -4,11 +4,15 @@
 
 #include <bus_dpaa_driver.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_kvargs.h>
 
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
 static uint32_t s_sg_max_entry_sz = 2000;
+static bool s_hw_err_check;
+
+#define DPAA_DMA_ERROR_CHECK "dpaa_dma_err_check"
 
 static inline void
 qdma_desc_addr_set64(struct fsl_qdma_comp_cmd_desc *ccdf, u64 addr)
@@ -642,7 +646,7 @@ fsl_qdma_enqueue_overflow(struct fsl_qdma_queue *fsl_queue)
 
 	check_num = 0;
 overflow_check:
-	if (fsl_qdma->is_silent) {
+	if (fsl_qdma->is_silent || unlikely(s_hw_err_check)) {
 		reg = qdma_readl_be(block +
 			 FSL_QDMA_BCQSR(fsl_queue->queue_id));
 		overflow = (reg & FSL_QDMA_BCQSR_QF_XOFF_BE) ?
@@ -1080,13 +1084,81 @@ dpaa_qdma_copy_sg(void *dev_private,
 	return ret;
 }
 
+static int
+dpaa_qdma_err_handle(struct fsl_qdma_err_reg *reg)
+{
+	struct fsl_qdma_err_reg local;
+	size_t i, offset = 0;
+	char err_msg[512];
+
+	local.dedr_be = rte_read32(&reg->dedr_be);
+	if (!local.dedr_be)
+		return 0;
+	offset = sprintf(err_msg, "ERR detected:");
+	if (local.dedr.ere) {
+		offset += sprintf(&err_msg[offset],
+			" ere(Enqueue rejection error)");
+	}
+	if (local.dedr.dde) {
+		offset += sprintf(&err_msg[offset],
+			" dde(Destination descriptor error)");
+	}
+	if (local.dedr.sde) {
+		offset += sprintf(&err_msg[offset],
+			" sde(Source descriptor error)");
+	}
+	if (local.dedr.cde) {
+		offset += sprintf(&err_msg[offset],
+			" cde(Command descriptor error)");
+	}
+	if (local.dedr.wte) {
+		offset += sprintf(&err_msg[offset],
+			" wte(Write transaction error)");
+	}
+	if (local.dedr.rte) {
+		offset += sprintf(&err_msg[offset],
+			" rte(Read transaction error)");
+	}
+	if (local.dedr.me) {
+		offset += sprintf(&err_msg[offset],
+			" me(Multiple errors of the same type)");
+	}
+	DPAA_QDMA_ERR("%s", err_msg);
+	for (i = 0; i < FSL_QDMA_DECCD_ERR_NUM; i++) {
+		local.deccd_le[FSL_QDMA_DECCD_ERR_NUM - 1 - i] =
+			QDMA_IN(&reg->deccd_le[i]);
+	}
+	local.deccqidr_be = rte_read32(&reg->deccqidr_be);
+	local.decbr = rte_read32(&reg->decbr);
+
+	offset = sprintf(err_msg, "ERR command:");
+	offset += sprintf(&err_msg[offset],
+		" status: %02x, ser: %d, offset:%d, fmt: %02x",
+		local.err_cmd.status, local.err_cmd.ser,
+		local.err_cmd.offset, local.err_cmd.format);
+	offset += sprintf(&err_msg[offset],
+		" address: 0x%"PRIx64", queue: %d, dd: %02x",
+		(uint64_t)local.err_cmd.addr_hi << 32 |
+		local.err_cmd.addr_lo,
+		local.err_cmd.queue, local.err_cmd.dd);
+	DPAA_QDMA_ERR("%s", err_msg);
+	DPAA_QDMA_ERR("ERR command block: %d, queue: %d",
+		local.deccqidr.block, local.deccqidr.queue);
+
+	rte_write32(local.dedr_be, &reg->dedr_be);
+
+	return -EIO;
+}
+
 static uint16_t
 dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 	const uint16_t nb_cpls, uint16_t *last_idx,
 	enum rte_dma_status_code *st)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1111,6 +1183,12 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan,
 			st[i] = RTE_DMA_STATUS_SUCCESSFUL;
 	}
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err)
+			fsl_queue->stats.errors++;
+	}
 
 	return dq_num;
 }
@@ -1121,7 +1199,9 @@ dpaa_qdma_dequeue(void *dev_private,
 	uint16_t *last_idx, bool *has_error)
 {
 	struct fsl_qdma_engine *fsl_qdma = dev_private;
+	int err;
 	struct fsl_qdma_queue *fsl_queue = fsl_qdma->chan[vchan];
+	void *status = fsl_qdma->status_base;
 	struct fsl_qdma_desc *desc_complete[nb_cpls];
 	uint16_t i, dq_num;
 
@@ -1142,6 +1222,16 @@ dpaa_qdma_dequeue(void *dev_private,
 	for (i = 0; i < dq_num; i++)
 		last_idx[i] = desc_complete[i]->flag;
 
+	if (s_hw_err_check) {
+		err = dpaa_qdma_err_handle(status +
+			FSL_QDMA_ERR_REG_STATUS_OFFSET);
+		if (err) {
+			if (has_error)
+				*has_error = true;
+			fsl_queue->stats.errors++;
+		}
+	}
+
 	return dq_num;
 }
 
@@ -1193,6 +1283,43 @@ static struct rte_dma_dev_ops dpaa_qdma_ops = {
 	.stats_reset		  = dpaa_qdma_stats_reset,
 };
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	if (strcmp(value, "1"))
+		return -1;
+
+	return 0;
+}
+
+static int
+dpaa_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+
+	if (rte_kvargs_process(kvlist, key,
+			       check_devargs_handler, NULL) < 0) {
+		rte_kvargs_free(kvlist);
+		return 0;
+	}
+	rte_kvargs_free(kvlist);
+
+	return 1;
+}
+
 static int
 dpaa_qdma_init(struct rte_dma_dev *dmadev)
 {
@@ -1203,6 +1330,11 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev)
 	int ret;
 	uint32_t i, j, k;
 
+	if (dpaa_get_devargs(dmadev->device->devargs, DPAA_DMA_ERROR_CHECK)) {
+		s_hw_err_check = true;
+		DPAA_QDMA_INFO("Enable DMA error checks");
+	}
+
 	fsl_qdma->n_queues = QDMA_QUEUES * QDMA_BLOCKS;
 	fsl_qdma->num_blocks = QDMA_BLOCKS;
 	fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
@@ -1344,4 +1476,5 @@ static struct rte_dpaa_driver rte_dpaa_qdma_pmd = {
 };
 
 RTE_PMD_REGISTER_DPAA(dpaa_qdma, rte_dpaa_qdma_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(dpaa_qdma, DPAA_DMA_ERROR_CHECK "=<int>");
 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma_logtype, INFO);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index 1e820d0207..91eaf1455a 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -199,6 +199,48 @@ struct fsl_qdma_cmpd_ft {
 	uint64_t phy_df;
 } __rte_packed;
 
+#define FSL_QDMA_ERR_REG_STATUS_OFFSET 0xe00
+
+struct fsl_qdma_dedr_reg {
+	uint32_t me:1;
+	uint32_t rsv0:1;
+	uint32_t rte:1;
+	uint32_t wte:1;
+	uint32_t cde:1;
+	uint32_t sde:1;
+	uint32_t dde:1;
+	uint32_t ere:1;
+	uint32_t rsv1:24;
+};
+
+struct fsl_qdma_deccqidr_reg {
+	uint32_t rsv:27;
+	uint32_t block:2;
+	uint32_t queue:3;
+};
+
+#define FSL_QDMA_DECCD_ERR_NUM \
+	(sizeof(struct fsl_qdma_comp_cmd_desc) / sizeof(uint32_t))
+
+struct fsl_qdma_err_reg {
+	uint32_t deier;
+	union {
+		rte_be32_t dedr_be;
+		struct fsl_qdma_dedr_reg dedr;
+	};
+	uint32_t rsv0[2];
+	union {
+		rte_le32_t deccd_le[FSL_QDMA_DECCD_ERR_NUM];
+		struct fsl_qdma_comp_cmd_desc err_cmd;
+	};
+	uint32_t rsv1[4];
+	union {
+		rte_be32_t deccqidr_be;
+		struct fsl_qdma_deccqidr_reg deccqidr;
+	};
+	rte_be32_t decbr;
+};
+
 #define DPAA_QDMA_IDXADDR_FROM_SG_FLAG(flag) \
 	((void *)(uintptr_t)((flag) - ((flag) & DPAA_QDMA_SG_IDX_ADDR_MASK)))
 
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 93b88acef8..408418f032 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2685,7 +2685,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
 
 	if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) {
 		dpaa2_enable_err_queue = 1;
-		DPAA2_PMD_INFO("Enable error queue");
+		DPAA2_PMD_INFO("Enable DMA error checks");
 	}
 
 	/* Allocate memory for hardware structure for queues */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* [v7 15/15] bus/dpaa: add port bmi stats
  2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
                                     ` (13 preceding siblings ...)
  2024-10-15  7:14                   ` [v7 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
@ 2024-10-15  7:14                   ` Gagandeep Singh
  14 siblings, 0 replies; 165+ messages in thread
From: Gagandeep Singh @ 2024-10-15  7:14 UTC (permalink / raw)
  To: dev, Hemant Agrawal, Sachin Saxena

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add BMI statistics and fixing the existing extended
statistics

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/bus/dpaa/base/fman/fman_hw.c | 65 +++++++++++++++++++++++++++-
 drivers/bus/dpaa/include/fman.h      |  4 +-
 drivers/bus/dpaa/include/fsl_fman.h  | 12 +++++
 drivers/bus/dpaa/version.map         |  4 ++
 drivers/net/dpaa/dpaa_ethdev.c       | 46 +++++++++++++++++---
 drivers/net/dpaa/dpaa_ethdev.h       | 12 +++++
 6 files changed, 134 insertions(+), 9 deletions(-)

diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 24a99f7235..27b39a4975 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -244,8 +244,8 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n)
 	uint64_t base_offset = offsetof(struct memac_regs, reoct_l);
 
 	for (i = 0; i < n; i++)
-		value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) |
-				(u64)in_be32((char *)regs + base_offset +
+		value[i] = ((u64)in_be32((char *)regs + base_offset + 8 * i) |
+				((u64)in_be32((char *)regs + base_offset +
 				8 * i + 4)) << 32);
 }
 
@@ -266,6 +266,67 @@ fman_if_stats_reset(struct fman_if *p)
 		;
 }
 
+void
+fman_if_bmi_stats_enable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp |= FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_disable(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	uint32_t tmp;
+
+	tmp = in_be32(&regs->fmbm_rstc);
+
+	tmp &= ~FMAN_BMI_COUNTERS_EN;
+
+	out_be32(&regs->fmbm_rstc, tmp);
+}
+
+void
+fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+	int i = 0;
+
+	value[i++] = (u32)in_be32(&regs->fmbm_rfrc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfbc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rlfc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rffc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfdc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rfldec);
+	value[i++] = (u32)in_be32(&regs->fmbm_rodc);
+	value[i++] = (u32)in_be32(&regs->fmbm_rbdc);
+}
+
+void
+fman_if_bmi_stats_reset(struct fman_if *p)
+{
+	struct __fman_if *m = container_of(p, struct __fman_if, __if);
+	struct rx_bmi_regs *regs = (struct rx_bmi_regs *)m->bmi_map;
+
+	out_be32(&regs->fmbm_rfrc, 0);
+	out_be32(&regs->fmbm_rfbc, 0);
+	out_be32(&regs->fmbm_rlfc, 0);
+	out_be32(&regs->fmbm_rffc, 0);
+	out_be32(&regs->fmbm_rfdc, 0);
+	out_be32(&regs->fmbm_rfldec, 0);
+	out_be32(&regs->fmbm_rodc, 0);
+	out_be32(&regs->fmbm_rbdc, 0);
+}
+
 void
 fman_if_promiscuous_enable(struct fman_if *p)
 {
diff --git a/drivers/bus/dpaa/include/fman.h b/drivers/bus/dpaa/include/fman.h
index f918836ec2..1f120b7614 100644
--- a/drivers/bus/dpaa/include/fman.h
+++ b/drivers/bus/dpaa/include/fman.h
@@ -56,6 +56,8 @@
 #define FMAN_PORT_BMI_FIFO_UNITS	0x100
 #define FMAN_PORT_IC_OFFSET_UNITS	0x10
 
+#define FMAN_BMI_COUNTERS_EN 0x80000000
+
 #define FMAN_ENABLE_BPOOL_DEPLETION	0xF00000F0
 
 #define HASH_CTRL_MCAST_EN	0x00000100
@@ -260,7 +262,7 @@ struct rx_bmi_regs {
 					/**< Buffer Manager pool Information-*/
 	uint32_t fmbm_acnt[FMAN_PORT_MAX_EXT_POOLS_NUM];
 					/**< Allocate Counter-*/
-	uint32_t reserved0130[8];
+	uint32_t reserved0120[16];
 					/**< 0x130/0x140 - 0x15F reserved -*/
 	uint32_t fmbm_rcgm[FMAN_PORT_CG_MAP_NUM];
 					/**< Congestion Group Map*/
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index 20690f8329..5a9750ad0c 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -60,6 +60,18 @@ void fman_if_stats_reset(struct fman_if *p);
 __rte_internal
 void fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n);
 
+__rte_internal
+void fman_if_bmi_stats_enable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_disable(struct fman_if *p);
+
+__rte_internal
+void fman_if_bmi_stats_get_all(struct fman_if *p, uint64_t *value);
+
+__rte_internal
+void fman_if_bmi_stats_reset(struct fman_if *p);
+
 /* Set ignore pause option for a specific interface */
 void fman_if_set_rx_ignore_pause_frames(struct fman_if *p, bool enable);
 
diff --git a/drivers/bus/dpaa/version.map b/drivers/bus/dpaa/version.map
index 3f547f75cf..a17d57632e 100644
--- a/drivers/bus/dpaa/version.map
+++ b/drivers/bus/dpaa/version.map
@@ -24,6 +24,10 @@ INTERNAL {
 	fman_dealloc_bufs_mask_hi;
 	fman_dealloc_bufs_mask_lo;
 	fman_if_add_mac_addr;
+	fman_if_bmi_stats_enable;
+	fman_if_bmi_stats_disable;
+	fman_if_bmi_stats_get_all;
+	fman_if_bmi_stats_reset;
 	fman_if_clear_mac_addr;
 	fman_if_disable_rx;
 	fman_if_discard_rx_errors;
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 51f5422e0c..da4a64d99a 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -131,6 +131,22 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 		offsetof(struct dpaa_if_stats, tvlan)},
 	{"rx_undersized",
 		offsetof(struct dpaa_if_stats, tund)},
+	{"rx_frame_counter",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfrc)},
+	{"rx_bad_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfbc)},
+	{"rx_large_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rlfc)},
+	{"rx_filter_frames_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rffc)},
+	{"rx_frame_discrad_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfdc)},
+	{"rx_frame_list_dma_err_count",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rfldec)},
+	{"rx_out_of_buffer_discard ",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rodc)},
+	{"rx_buf_diallocate",
+		offsetof(struct dpaa_if_rx_bmi_stats, fmbm_rbdc)},
 };
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
@@ -430,6 +446,7 @@ static void dpaa_interrupt_handler(void *param)
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
+	struct fman_if *fif = dev->process_private;
 	uint16_t i;
 
 	PMD_INIT_FUNC_TRACE();
@@ -443,7 +460,9 @@ static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 	else
 		dev->tx_pkt_burst = dpaa_eth_queue_tx;
 
-	fman_if_enable_rx(dev->process_private);
+	fman_if_bmi_stats_enable(fif);
+	fman_if_bmi_stats_reset(fif);
+	fman_if_enable_rx(fif);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
@@ -461,8 +480,10 @@ static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 	dev->data->dev_started = 0;
 
-	if (!fif->is_shared_mac)
+	if (!fif->is_shared_mac) {
+		fman_if_bmi_stats_disable(fif);
 		fman_if_disable_rx(fif);
+	}
 	dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++)
@@ -769,6 +790,7 @@ static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 	PMD_INIT_FUNC_TRACE();
 
 	fman_if_stats_reset(dev->process_private);
+	fman_if_bmi_stats_reset(dev->process_private);
 
 	return 0;
 }
@@ -777,8 +799,9 @@ static int
 dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		    unsigned int n)
 {
-	unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i = 0, j, num = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (n < num)
 		return num;
@@ -789,10 +812,16 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	fman_if_stats_get_all(dev->process_private, values,
 			      sizeof(struct dpaa_if_stats) / 8);
 
-	for (i = 0; i < num; i++) {
+	for (i = 0; i < num - (bmi_count - 1); i++) {
 		xstats[i].id = i;
 		xstats[i].value = values[dpaa_xstats_strings[i].offset / 8];
 	}
+	fman_if_bmi_stats_get_all(dev->process_private, values);
+	for (j = 0; i < num; i++, j++) {
+		xstats[i].id = i;
+		xstats[i].value = values[j];
+	}
+
 	return i;
 }
 
@@ -819,8 +848,9 @@ static int
 dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		      uint64_t *values, unsigned int n)
 {
-	unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+	unsigned int i, j, stat_cnt = RTE_DIM(dpaa_xstats_strings);
 	uint64_t values_copy[sizeof(struct dpaa_if_stats) / 8];
+	unsigned int bmi_count = sizeof(struct dpaa_if_rx_bmi_stats) / 4;
 
 	if (!ids) {
 		if (n < stat_cnt)
@@ -832,10 +862,14 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 		fman_if_stats_get_all(dev->process_private, values_copy,
 				      sizeof(struct dpaa_if_stats) / 8);
 
-		for (i = 0; i < stat_cnt; i++)
+		for (i = 0; i < stat_cnt - (bmi_count - 1); i++)
 			values[i] =
 				values_copy[dpaa_xstats_strings[i].offset / 8];
 
+		fman_if_bmi_stats_get_all(dev->process_private, values);
+		for (j = 0; i < stat_cnt; i++, j++)
+			values[i] = values_copy[j];
+
 		return stat_cnt;
 	}
 
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index 0006bd33d4..1278623e7b 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -212,6 +212,18 @@ dpaa_rx_cb_atomic(void *event,
 		  const struct qm_dqrr_entry *dqrr,
 		  void **bufs);
 
+struct dpaa_if_rx_bmi_stats {
+	uint32_t fmbm_rstc;		/**< Rx Statistics Counters*/
+	uint32_t fmbm_rfrc;		/**< Rx Frame Counter*/
+	uint32_t fmbm_rfbc;		/**< Rx Bad Frames Counter*/
+	uint32_t fmbm_rlfc;		/**< Rx Large Frames Counter*/
+	uint32_t fmbm_rffc;		/**< Rx Filter Frames Counter*/
+	uint32_t fmbm_rfdc;		/**< Rx Frame Discard Counter*/
+	uint32_t fmbm_rfldec;		/**< Rx Frames List DMA Error Counter*/
+	uint32_t fmbm_rodc;		/**< Rx Out of Buffers Discard nntr*/
+	uint32_t fmbm_rbdc;		/**< Rx Buffers Deallocate Counter*/
+};
+
 /* PMD related logs */
 extern int dpaa_logtype_pmd;
 #define RTE_LOGTYPE_DPAA_PMD dpaa_logtype_pmd
-- 
2.25.1


^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v7 02/15] dma/dpaa2: refactor driver code
  2024-10-15  7:13                   ` [v7 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
@ 2024-10-15 22:11                     ` Stephen Hemminger
  2024-10-16  5:09                       ` Hemant Agrawal
  0 siblings, 1 reply; 165+ messages in thread
From: Stephen Hemminger @ 2024-10-15 22:11 UTC (permalink / raw)
  To: Gagandeep Singh; +Cc: dev, Hemant Agrawal, Anatoly Burakov, Jun Yang

On Tue, 15 Oct 2024 12:43:50 +0530
Gagandeep Singh <g.singh@nxp.com> wrote:

> From: Jun Yang <jun.yang@nxp.com>
> 
> refactor the driver code with changes in:
> - multiple HW queues
> - SMA single copy and SG copy
> - silent mode
> 
> Signed-off-by: Jun Yang <jun.yang@nxp.com>
> ---

To prevent false positives from checkpatch spell check, it would
be good to have a follow-on patch that does:

diff --git a/devtools/build-dict.sh b/devtools/build-dict.sh
index a8cac49029..d503fb52fc 100755
--- a/devtools/build-dict.sh
+++ b/devtools/build-dict.sh
@@ -17,6 +17,8 @@ sed '/^..->/d' |
 sed '/^uint->/d' |
 sed "/^doesn'->/d" |
 sed '/^wasn->/d' |
+sed '/^ser->/d' |
+sed '/^fle->/d' |
 
 # print to stdout
 cat

^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v7 02/15] dma/dpaa2: refactor driver code
  2024-10-15 22:11                     ` Stephen Hemminger
@ 2024-10-16  5:09                       ` Hemant Agrawal
  2024-10-16  5:13                         ` Stephen Hemminger
  0 siblings, 1 reply; 165+ messages in thread
From: Hemant Agrawal @ 2024-10-16  5:09 UTC (permalink / raw)
  To: Stephen Hemminger, Gagandeep Singh
  Cc: dev, Hemant Agrawal, Anatoly Burakov, Jun Yang


On 16-10-2024 03:41, Stephen Hemminger wrote:
> On Tue, 15 Oct 2024 12:43:50 +0530
> Gagandeep Singh <g.singh@nxp.com> wrote:
>
>> From: Jun Yang <jun.yang@nxp.com>
>>
>> refactor the driver code with changes in:
>> - multiple HW queues
>> - SMA single copy and SG copy
>> - silent mode
>>
>> Signed-off-by: Jun Yang <jun.yang@nxp.com>
>> ---
> To prevent false positives from checkpatch spell check, it would
> be good to have a follow-on patch that does:
>
> diff --git a/devtools/build-dict.sh b/devtools/build-dict.sh
> index a8cac49029..d503fb52fc 100755
> --- a/devtools/build-dict.sh
> +++ b/devtools/build-dict.sh
> @@ -17,6 +17,8 @@ sed '/^..->/d' |
>   sed '/^uint->/d' |
>   sed "/^doesn'->/d" |
>   sed '/^wasn->/d' |
> +sed '/^ser->/d' |
> +sed '/^fle->/d' |
>   
>   # print to stdout
>   cat

Hi Stephen,

     It may not be good idea to add these.  For DPAA these are proper 
keyboard, however for others - *fle* can be a spelling mistake of *file*

Regards,

Hemant


^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v7 02/15] dma/dpaa2: refactor driver code
  2024-10-16  5:09                       ` Hemant Agrawal
@ 2024-10-16  5:13                         ` Stephen Hemminger
  2024-10-16  5:15                           ` Hemant Agrawal
  0 siblings, 1 reply; 165+ messages in thread
From: Stephen Hemminger @ 2024-10-16  5:13 UTC (permalink / raw)
  To: Hemant Agrawal
  Cc: Gagandeep Singh, dev, Hemant Agrawal, Anatoly Burakov, Jun Yang

On Wed, 16 Oct 2024 10:39:40 +0530
Hemant Agrawal <hemant.agrawal@oss.nxp.com> wrote:

> >
> > diff --git a/devtools/build-dict.sh b/devtools/build-dict.sh
> > index a8cac49029..d503fb52fc 100755
> > --- a/devtools/build-dict.sh
> > +++ b/devtools/build-dict.sh
> > @@ -17,6 +17,8 @@ sed '/^..->/d' |
> >   sed '/^uint->/d' |
> >   sed "/^doesn'->/d" |
> >   sed '/^wasn->/d' |
> > +sed '/^ser->/d' |
> > +sed '/^fle->/d' |
> >   
> >   # print to stdout
> >   cat  
> 
> Hi Stephen,
> 
>      It may not be good idea to add these.  For DPAA these are proper 
> keyboard, however for others - *fle* can be a spelling mistake of *file*
> 
> Regards,
> 
> Hemant


But every patch gets tagged as checkpatch error now.
Or change the variable names?

^ permalink raw reply	[flat|nested] 165+ messages in thread

* Re: [v7 02/15] dma/dpaa2: refactor driver code
  2024-10-16  5:13                         ` Stephen Hemminger
@ 2024-10-16  5:15                           ` Hemant Agrawal
  0 siblings, 0 replies; 165+ messages in thread
From: Hemant Agrawal @ 2024-10-16  5:15 UTC (permalink / raw)
  To: Stephen Hemminger
  Cc: Gagandeep Singh, dev, Hemant Agrawal, Anatoly Burakov, Jun Yang


On 16-10-2024 10:43, Stephen Hemminger wrote:
> On Wed, 16 Oct 2024 10:39:40 +0530
> Hemant Agrawal <hemant.agrawal@oss.nxp.com> wrote:
>
>>> diff --git a/devtools/build-dict.sh b/devtools/build-dict.sh
>>> index a8cac49029..d503fb52fc 100755
>>> --- a/devtools/build-dict.sh
>>> +++ b/devtools/build-dict.sh
>>> @@ -17,6 +17,8 @@ sed '/^..->/d' |
>>>    sed '/^uint->/d' |
>>>    sed "/^doesn'->/d" |
>>>    sed '/^wasn->/d' |
>>> +sed '/^ser->/d' |
>>> +sed '/^fle->/d' |
>>>    
>>>    # print to stdout
>>>    cat
>> Hi Stephen,
>>
>>       It may not be good idea to add these.  For DPAA these are proper
>> keyboard, however for others - *fle* can be a spelling mistake of *file*
>>
>> Regards,
>>
>> Hemant
>
> But every patch gets tagged as checkpatch error now.
> Or change the variable names?

Changing variable name is not feasible, as it align with base driver and 
register names.

If you don't want to ignore the checkpatch errors, than we can go with 
your suggestion.


^ permalink raw reply	[flat|nested] 165+ messages in thread

end of thread, other threads:[~2024-10-16  5:15 UTC | newest]

Thread overview: 165+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-19 10:00 ` [PATCH 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-19 10:01 ` [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-19 10:01 ` [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-19 10:01 ` [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-19 10:01 ` [PATCH 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-19 10:01 ` [PATCH 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-19 10:01 ` [PATCH 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-19 10:01 ` [PATCH 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-19 10:01 ` [PATCH 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-19 10:01 ` [PATCH 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-19 10:01 ` [PATCH 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-19 10:01 ` [PATCH 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-19 10:01 ` [PATCH 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-19 10:01 ` [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-19 10:01 ` [PATCH 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-19 10:01 ` [PATCH 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-19 10:01 ` [PATCH 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 11:58   ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 11:58   ` [v2 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 11:58   ` [v2 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 11:58   ` [v2 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 11:58   ` [v2 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 11:58   ` [v2 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 11:58   ` [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 11:58   ` [v2 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 11:58   ` [v2 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 11:58   ` [v2 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 11:58   ` [v2 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 11:58   ` [v2 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 11:58   ` [v2 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 11:58   ` [v2 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 11:58   ` [v2 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 11:58   ` [v2 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 11:58   ` [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 11:58   ` [v2 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 11:58   ` [v2 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 11:58   ` [v2 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 11:58   ` [v2 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 11:58   ` [v2 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 11:58   ` [v2 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 11:58   ` [v2 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 11:58   ` [v2 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 11:58   ` [v2 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 11:58   ` [v2 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 11:58   ` [v2 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 11:58   ` [v2 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 11:58   ` [v2 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 16:39   ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 16:39     ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-08  7:22       ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-08  7:22         ` [v4 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-08 10:57           ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-08 10:57             ` [v5 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-14  9:36               ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-14  9:36                 ` [v6 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-14  9:36                 ` [v6 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-14  9:36                 ` [v6 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-14  9:36                 ` [v6 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-14  9:36                 ` [v6 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-14  9:36                 ` [v6 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-14  9:36                 ` [v6 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-14  9:36                 ` [v6 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-15  2:59                   ` Stephen Hemminger
2024-10-14  9:36                 ` [v6 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-14  9:36                 ` [v6 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-14  9:36                 ` [v6 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-14  9:36                 ` [v6 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-14  9:36                 ` [v6 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-14  9:36                 ` [v6 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-14  9:36                 ` [v6 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-10-15  7:13                 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-15  7:13                   ` [v7 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-15  7:13                   ` [v7 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-15 22:11                     ` Stephen Hemminger
2024-10-16  5:09                       ` Hemant Agrawal
2024-10-16  5:13                         ` Stephen Hemminger
2024-10-16  5:15                           ` Hemant Agrawal
2024-10-15  7:13                   ` [v7 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-15  7:13                   ` [v7 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-15  7:13                   ` [v7 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-15  7:13                   ` [v7 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-15  7:13                   ` [v7 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-15  7:13                   ` [v7 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-15  7:13                   ` [v7 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-15  7:13                   ` [v7 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-15  7:13                   ` [v7 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-15  7:14                   ` [v7 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-15  7:14                   ` [v7 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-15  7:14                   ` [v7 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-15  7:14                   ` [v7 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-10-08 10:57             ` [v5 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-08 10:57             ` [v5 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-08 10:57             ` [v5 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-08 10:58             ` [v5 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-08 10:58             ` [v5 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-08 10:58             ` [v5 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-08 10:58             ` [v5 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-08 10:58             ` [v5 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-08 10:58             ` [v5 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-08 10:58             ` [v5 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-08 10:58             ` [v5 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-08 10:58             ` [v5 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-08 10:58             ` [v5 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-08 10:58             ` [v5 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-10-09 18:02               ` Stephen Hemminger
2024-10-08  7:22         ` [v4 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-08  7:22         ` [v4 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-08  7:22         ` [v4 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-08  7:22         ` [v4 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-08  7:22         ` [v4 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-08  7:22         ` [v4 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-08  7:22         ` [v4 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-08  7:23         ` [v4 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-08  7:23         ` [v4 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-08  7:23         ` [v4 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-08  7:23         ` [v4 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-08  7:23         ` [v4 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-08  7:23         ` [v4 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-08  7:23         ` [v4 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 16:39     ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 20:19       ` Stephen Hemminger
2024-10-07 20:51       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 16:39     ` [v3 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 16:39     ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 20:21       ` Stephen Hemminger
2024-07-22 16:39     ` [v3 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 16:39     ` [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 16:39     ` [v3 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 16:39     ` [v3 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 16:39     ` [v3 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 16:39     ` [v3 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 16:39     ` [v3 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 16:39     ` [v3 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 16:39     ` [v3 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 16:39     ` [v3 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 16:39     ` [v3 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 16:39     ` [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 16:39     ` [v3 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 16:39     ` [v3 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 16:39     ` [v3 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 16:39     ` [v3 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 16:39     ` [v3 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 16:39     ` [v3 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 16:39     ` [v3 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 16:39     ` [v3 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 16:39     ` [v3 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 16:39     ` [v3 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 16:39     ` [v3 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 16:39     ` [v3 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 16:39     ` [v3 30/30] bus/dpaa: add port bmi stats Gagandeep Singh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).