DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA
@ 2019-11-06 14:43 Nipun Gupta
  2019-11-06 14:43 ` [dpdk-dev] [PATCH 2/2] event/dpaa2: support ordered queue case type Nipun Gupta
  2019-11-07  9:57 ` [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA Hemant Agrawal
  0 siblings, 2 replies; 4+ messages in thread
From: Nipun Gupta @ 2019-11-06 14:43 UTC (permalink / raw)
  To: dev; +Cc: thomas, hemant.agrawal, Minghuan Lian, Sachin Saxena, Nipun Gupta

RBP or route by ports can help in translating the DMA
address over the PCI. Thus adding the RBP support with
long and short formats

Signed-off-by: Minghuan Lian <Minghuan.Lian@nxp.com>
Signed-off-by: Sachin Saxena <sachin.saxena@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 .../bus/fslmc/qbman/include/fsl_qbman_base.h  |  84 ++++
 drivers/raw/dpaa2_qdma/dpaa2_qdma.c           | 399 ++++++++++++------
 drivers/raw/dpaa2_qdma/dpaa2_qdma.h           |  17 +-
 drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h   |   9 +
 4 files changed, 364 insertions(+), 145 deletions(-)

diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 48bdaafa4..9323db370 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -135,6 +135,90 @@ struct qbman_fd {
 			uint32_t flc_lo;
 			uint32_t flc_hi;
 		} simple;
+
+		struct qbman_fd_us_pci_simple {
+			uint32_t saddr_lo;
+			uint32_t saddr_hi;
+
+			uint32_t len_sl:18;
+			uint32_t rsv1:14;
+
+			uint32_t sportid:4;
+			uint32_t rsv2:22;
+			uint32_t bmt:1;
+			uint32_t rsv3:1;
+			uint32_t fmt:2;
+			uint32_t sl:1;
+			uint32_t rsv4:1;
+
+			uint32_t acc_err:4;
+			uint32_t rsv5:4;
+			uint32_t ser:1;
+			uint32_t rsv6:3;
+			uint32_t wrttype:4;
+			uint32_t dqos:3;
+			uint32_t drbp:1;
+			uint32_t dlwc:2;
+			uint32_t rsv7:2;
+			uint32_t rdttype:4;
+			uint32_t sqos:3;
+			uint32_t srbp:1;
+
+			uint32_t error:8;
+			uint32_t dportid:4;
+			uint32_t rsv8:5;
+			uint32_t dca:1;
+			uint32_t dat:2;
+			uint32_t dattr:3;
+			uint32_t dvfa:1;
+			uint32_t dtc:3;
+			uint32_t so:1;
+			uint32_t dd:4;
+
+			uint32_t daddr_lo;
+			uint32_t daddr_hi;
+		} simple_pci;
+		struct qbman_fd_us_ddr_simple {
+			uint32_t saddr_lo;
+
+			uint32_t saddr_hi:17;
+			uint32_t rsv1:15;
+
+			uint32_t len;
+
+			uint32_t rsv2:15;
+			uint32_t bmt:1;
+			uint32_t rsv3:12;
+			uint32_t fmt:2;
+			uint32_t sl:1;
+			uint32_t rsv4:1;
+
+			uint32_t acc_err:4;
+			uint32_t rsv5:4;
+			uint32_t ser:1;
+			uint32_t rsv6:2;
+			uint32_t wns:1;
+			uint32_t wrttype:4;
+			uint32_t dqos:3;
+			uint32_t rsv12:1;
+			uint32_t dlwc:2;
+			uint32_t rsv7:1;
+			uint32_t rns:1;
+			uint32_t rdttype:4;
+			uint32_t sqos:3;
+			uint32_t rsv11:1;
+
+			uint32_t error:8;
+			uint32_t rsv8:6;
+			uint32_t va:1;
+			uint32_t rsv9:13;
+			uint32_t dd:4;
+
+			uint32_t daddr_lo;
+
+			uint32_t daddr_hi:17;
+			uint32_t rsv10:15;
+		} simple_ddr;
 	};
 };
 
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
index af678273d..c90595400 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -54,6 +54,267 @@ typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,
 
 dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;
 
+typedef uint16_t (dpdmai_dev_get_job_t)(const struct qbman_fd *fd,
+					struct rte_qdma_job **job);
+typedef int (dpdmai_dev_set_fd_t)(struct qbman_fd *fd,
+				  struct rte_qdma_job *job,
+				  struct rte_qdma_rbp *rbp,
+				  uint16_t vq_id);
+dpdmai_dev_get_job_t *dpdmai_dev_get_job;
+dpdmai_dev_set_fd_t *dpdmai_dev_set_fd;
+
+static inline int
+qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
+			uint32_t len, struct qbman_fd *fd,
+			struct rte_qdma_rbp *rbp)
+{
+	fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
+	fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
+
+	fd->simple_pci.len_sl = len;
+
+	fd->simple_pci.bmt = 1;
+	fd->simple_pci.fmt = 3;
+	fd->simple_pci.sl = 1;
+	fd->simple_pci.ser = 1;
+
+	fd->simple_pci.sportid = rbp->sportid;	/*pcie 3 */
+	fd->simple_pci.srbp = rbp->srbp;
+	if (rbp->srbp)
+		fd->simple_pci.rdttype = 0;
+	else
+		fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
+
+	/*dest is pcie memory */
+	fd->simple_pci.dportid = rbp->dportid;	/*pcie 3 */
+	fd->simple_pci.drbp = rbp->drbp;
+	if (rbp->drbp)
+		fd->simple_pci.wrttype = 0;
+	else
+		fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
+	fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
+
+	return 0;
+}
+
+static inline int
+qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
+			uint32_t len, struct qbman_fd *fd)
+{
+	fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
+	fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
+
+	fd->simple_ddr.len = len;
+
+	fd->simple_ddr.bmt = 1;
+	fd->simple_ddr.fmt = 3;
+	fd->simple_ddr.sl = 1;
+	fd->simple_ddr.ser = 1;
+	/**
+	 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
+	 * Coherent copy of cacheable memory,
+	 * lookup in downstream cache, no allocate
+	 * on miss
+	 */
+	fd->simple_ddr.rns = 0;
+	fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
+	/**
+	 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
+	 * Coherent write of cacheable memory,
+	 * lookup in downstream cache, no allocate on miss
+	 */
+	fd->simple_ddr.wns = 0;
+	fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
+
+	fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
+	fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
+
+	return 0;
+}
+
+static void
+dpaa2_qdma_populate_fle(struct qbman_fle *fle,
+			struct rte_qdma_rbp *rbp,
+			uint64_t src, uint64_t dest,
+			size_t len, uint32_t flags)
+{
+	struct qdma_sdd *sdd;
+
+	sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
+		(DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
+
+	/* first frame list to source descriptor */
+	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
+	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+
+	/* source and destination descriptor */
+	if (rbp && rbp->enable) {
+		/* source */
+		sdd->read_cmd.portid = rbp->sportid;
+		sdd->rbpcmd_simple.pfid = rbp->spfid;
+		sdd->rbpcmd_simple.vfid = rbp->svfid;
+
+		if (rbp->srbp) {
+			sdd->read_cmd.rbp = rbp->srbp;
+			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
+		} else {
+			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+		}
+		sdd++;
+		/* destination */
+		sdd->write_cmd.portid = rbp->dportid;
+		sdd->rbpcmd_simple.pfid = rbp->dpfid;
+		sdd->rbpcmd_simple.vfid = rbp->dvfid;
+
+		if (rbp->drbp) {
+			sdd->write_cmd.rbp = rbp->drbp;
+			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
+		} else {
+			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+		}
+
+	} else {
+		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
+		sdd++;
+		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
+	}
+	fle++;
+	/* source frame list to source buffer */
+	if (flags & RTE_QDMA_JOB_SRC_PHY) {
+		DPAA2_SET_FLE_ADDR(fle, src);
+		DPAA2_SET_FLE_BMT(fle);
+	} else {
+		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
+	}
+	DPAA2_SET_FLE_LEN(fle, len);
+
+	fle++;
+	/* destination frame list to destination buffer */
+	if (flags & RTE_QDMA_JOB_DEST_PHY) {
+		DPAA2_SET_FLE_BMT(fle);
+		DPAA2_SET_FLE_ADDR(fle, dest);
+	} else {
+		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
+	}
+	DPAA2_SET_FLE_LEN(fle, len);
+
+	/* Final bit: 1, for last frame list */
+	DPAA2_SET_FLE_FIN(fle);
+}
+
+static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,
+					struct rte_qdma_job *job,
+					struct rte_qdma_rbp *rbp,
+					uint16_t vq_id)
+{
+	struct rte_qdma_job **ppjob;
+	size_t iova;
+	int ret = 0;
+
+	if (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)
+		iova = (size_t)job->dest;
+	else
+		iova = (size_t)job->src;
+
+	/* Set the metadata */
+	job->vq_id = vq_id;
+	ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
+	*ppjob = job;
+
+	if ((rbp->drbp == 1) || (rbp->srbp == 1))
+		ret = qdma_populate_fd_pci((phys_addr_t) job->src,
+					   (phys_addr_t) job->dest,
+					   job->len, fd, rbp);
+	else
+		ret = qdma_populate_fd_ddr((phys_addr_t) job->src,
+					   (phys_addr_t) job->dest,
+					   job->len, fd);
+	return ret;
+}
+static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,
+					struct rte_qdma_job *job,
+					struct rte_qdma_rbp *rbp,
+					uint16_t vq_id)
+{
+	struct rte_qdma_job **ppjob;
+	struct qbman_fle *fle;
+	int ret = 0;
+	/*
+	 * Get an FLE/SDD from FLE pool.
+	 * Note: IO metadata is before the FLE and SDD memory.
+	 */
+	ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&ppjob));
+	if (ret) {
+		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
+		return ret;
+	}
+
+	/* Set the metadata */
+	job->vq_id = vq_id;
+	*ppjob = job;
+
+	fle = (struct qbman_fle *)(ppjob + 1);
+
+	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+	DPAA2_SET_FD_COMPOUND_FMT(fd);
+	DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
+
+	/* Populate FLE */
+	memset(fle, 0, QDMA_FLE_POOL_SIZE);
+	dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
+				job->len, job->flags);
+
+	return 0;
+}
+
+static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,
+					struct rte_qdma_job **job)
+{
+	uint16_t vqid;
+	size_t iova;
+	struct rte_qdma_job **ppjob;
+
+	if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
+		iova = (size_t) (((uint64_t)fd->simple_pci.daddr_hi) << 32
+				| (uint64_t)fd->simple_pci.daddr_lo);
+	else
+		iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
+				| (uint64_t)fd->simple_pci.saddr_lo);
+
+	ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
+	*job = (struct rte_qdma_job *)*ppjob;
+	(*job)->status = (fd->simple_pci.acc_err << 8) | (fd->simple_pci.error);
+	vqid = (*job)->vq_id;
+
+	return vqid;
+}
+
+static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,
+					struct rte_qdma_job **job)
+{
+	struct rte_qdma_job **ppjob;
+	uint16_t vqid;
+	/*
+	 * Fetch metadata from FLE. job and vq_id were set
+	 * in metadata in the enqueue operation.
+	 */
+	ppjob = (struct rte_qdma_job **)
+			DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	ppjob -= 1;
+
+	*job = (struct rte_qdma_job *)*ppjob;
+	(*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
+			 (DPAA2_GET_FD_FRC(fd) & 0xFF);
+	vqid = (*job)->vq_id;
+
+	/* Free FLE to the pool */
+	rte_mempool_put(qdma_dev.fle_pool, (void *)ppjob);
+
+	return vqid;
+}
+
 static struct qdma_hw_queue *
 alloc_hw_queue(uint32_t lcore_id)
 {
@@ -291,6 +552,13 @@ rte_qdma_configure(struct rte_qdma_config *qdma_config)
 	}
 	qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
 
+	if (qdma_config->format == RTE_QDMA_ULTRASHORT_FORMAT) {
+		dpdmai_dev_get_job = dpdmai_dev_get_job_us;
+		dpdmai_dev_set_fd = dpdmai_dev_set_fd_us;
+	} else {
+		dpdmai_dev_get_job = dpdmai_dev_get_job_lf;
+		dpdmai_dev_set_fd = dpdmai_dev_set_fd_lf;
+	}
 	return 0;
 }
 
@@ -379,112 +647,6 @@ rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
 	return i;
 }
 
-static void
-dpaa2_qdma_populate_fle(struct qbman_fle *fle,
-			struct rte_qdma_rbp *rbp,
-			uint64_t src, uint64_t dest,
-			size_t len, uint32_t flags)
-{
-	struct qdma_sdd *sdd;
-
-	sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
-		(DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
-
-	/* first frame list to source descriptor */
-	DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
-	DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
-
-	/* source and destination descriptor */
-	if (rbp && rbp->enable) {
-		/* source */
-		sdd->read_cmd.portid = rbp->sportid;
-		sdd->rbpcmd_simple.pfid = rbp->spfid;
-		sdd->rbpcmd_simple.vfid = rbp->svfid;
-
-		if (rbp->srbp) {
-			sdd->read_cmd.rbp = rbp->srbp;
-			sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
-		} else {
-			sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		}
-		sdd++;
-		/* destination */
-		sdd->write_cmd.portid = rbp->dportid;
-		sdd->rbpcmd_simple.pfid = rbp->dpfid;
-		sdd->rbpcmd_simple.vfid = rbp->dvfid;
-
-		if (rbp->drbp) {
-			sdd->write_cmd.rbp = rbp->drbp;
-			sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
-		} else {
-			sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
-		}
-
-	} else {
-		sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
-		sdd++;
-		sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
-	}
-	fle++;
-	/* source frame list to source buffer */
-	if (flags & RTE_QDMA_JOB_SRC_PHY) {
-		DPAA2_SET_FLE_ADDR(fle, src);
-		DPAA2_SET_FLE_BMT(fle);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
-	}
-	DPAA2_SET_FLE_LEN(fle, len);
-
-	fle++;
-	/* destination frame list to destination buffer */
-	if (flags & RTE_QDMA_JOB_DEST_PHY) {
-		DPAA2_SET_FLE_BMT(fle);
-		DPAA2_SET_FLE_ADDR(fle, dest);
-	} else {
-		DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
-	}
-	DPAA2_SET_FLE_LEN(fle, len);
-
-	/* Final bit: 1, for last frame list */
-	DPAA2_SET_FLE_FIN(fle);
-}
-
-static inline uint16_t dpdmai_dev_set_fd(struct qbman_fd *fd,
-					struct rte_qdma_job *job,
-					struct rte_qdma_rbp *rbp,
-					uint16_t vq_id)
-{
-	struct qdma_io_meta *io_meta;
-	struct qbman_fle *fle;
-	int ret = 0;
-	/*
-	 * Get an FLE/SDD from FLE pool.
-	 * Note: IO metadata is before the FLE and SDD memory.
-	 */
-	ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
-	if (ret) {
-		DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
-		return ret;
-	}
-
-	/* Set the metadata */
-	io_meta->cnxt = (size_t)job;
-	io_meta->id = vq_id;
-
-	fle = (struct qbman_fle *)(io_meta + 1);
-
-	DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
-	DPAA2_SET_FD_COMPOUND_FMT(fd);
-	DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
-
-	/* Populate FLE */
-	memset(fle, 0, QDMA_FLE_POOL_SIZE);
-	dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
-				job->len, job->flags);
-
-	return 0;
-}
-
 static int
 dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
 			uint16_t txq_id,
@@ -602,31 +764,6 @@ rte_qdma_vq_enqueue(uint16_t vq_id,
 	return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
 }
 
-static inline uint16_t dpdmai_dev_get_job(const struct qbman_fd *fd,
-					struct rte_qdma_job **job)
-{
-	struct qbman_fle *fle;
-	struct qdma_io_meta *io_meta;
-	uint16_t vqid;
-	/*
-	 * Fetch metadata from FLE. job and vq_id were set
-	 * in metadata in the enqueue operation.
-	 */
-	fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
-	io_meta = (struct qdma_io_meta *)(fle) - 1;
-
-	*job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
-	(*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
-			 (DPAA2_GET_FD_FRC(fd) & 0xFF);
-
-	vqid = io_meta->id;
-
-	/* Free FLE to the pool */
-	rte_mempool_put(qdma_dev.fle_pool, io_meta);
-
-	return vqid;
-}
-
 /* Function to receive a QDMA job for a given device and queue*/
 static int
 dpdmai_dev_dequeue_multijob_prefetch(
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
index f15dda694..1bce1a4d6 100644
--- a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -1,12 +1,12 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 NXP
+ * Copyright 2018-2019 NXP
  */
 
 #ifndef __DPAA2_QDMA_H__
 #define __DPAA2_QDMA_H__
 
 struct qdma_sdd;
-struct qdma_io_meta;
+struct rte_qdma_job;
 
 #define DPAA2_QDMA_MAX_FLE 3
 #define DPAA2_QDMA_MAX_SDD 2
@@ -14,7 +14,7 @@ struct qdma_io_meta;
 #define DPAA2_DPDMAI_MAX_QUEUES	8
 
 /** FLE pool size: 3 Frame list + 2 source/destination descriptor */
-#define QDMA_FLE_POOL_SIZE (sizeof(struct qdma_io_meta) + \
+#define QDMA_FLE_POOL_SIZE (sizeof(struct rte_qdma_job *) + \
 		sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
 		sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
 /** FLE pool cache size */
@@ -108,17 +108,6 @@ struct qdma_per_core_info {
 	uint16_t num_hw_queues;
 };
 
-/** Metadata which is stored with each operation */
-struct qdma_io_meta {
-	/**
-	 * Context which is stored in the FLE pool (just before the FLE).
-	 * QDMA job is stored as a this context as a part of metadata.
-	 */
-	uint64_t cnxt;
-	/** VQ ID is stored as a part of metadata of the enqueue command */
-	uint64_t id;
-};
-
 /** Source/Destination Descriptor */
 struct qdma_sdd {
 	uint32_t rsv;
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
index a1f905035..4e1268cc5 100644
--- a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -33,6 +33,12 @@ enum {
 	RTE_QDMA_MODE_VIRTUAL
 };
 
+/** Determines the format of FD */
+enum {
+	RTE_QDMA_LONG_FORMAT,
+	RTE_QDMA_ULTRASHORT_FORMAT,
+};
+
 /**
  * If user has configured a Virtual Queue mode, but for some particular VQ
  * user needs an exclusive H/W queue associated (for better performance
@@ -62,6 +68,8 @@ struct rte_qdma_config {
 	uint16_t max_vqs;
 	/** mode of operation - physical(h/w) or virtual */
 	uint8_t mode;
+	/** FD format */
+	uint8_t format;
 	/**
 	 * User provides this as input to the driver as a size of the FLE pool.
 	 * FLE's (and corresponding source/destination descriptors) are
@@ -143,6 +151,7 @@ struct rte_qdma_job {
 	 * lower 8bits fd error
 	 */
 	uint16_t status;
+	uint16_t vq_id;
 };
 
 /**
-- 
2.17.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [dpdk-dev] [PATCH 2/2] event/dpaa2: support ordered queue case type
  2019-11-06 14:43 [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA Nipun Gupta
@ 2019-11-06 14:43 ` Nipun Gupta
  2019-11-07  9:57 ` [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA Hemant Agrawal
  1 sibling, 0 replies; 4+ messages in thread
From: Nipun Gupta @ 2019-11-06 14:43 UTC (permalink / raw)
  To: dev; +Cc: thomas, hemant.agrawal, Nipun Gupta

Ordered queue is supported on DPAA2. Enable this case.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/event/dpaa2/dpaa2_eventdev.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index 4ee2c460e..d71361666 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -477,7 +477,6 @@ dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
 
 	RTE_SET_USED(dev);
 	RTE_SET_USED(queue_id);
-	RTE_SET_USED(queue_conf);
 
 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
@@ -496,8 +495,9 @@ dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
 	switch (queue_conf->schedule_type) {
 	case RTE_SCHED_TYPE_PARALLEL:
 	case RTE_SCHED_TYPE_ATOMIC:
-		break;
 	case RTE_SCHED_TYPE_ORDERED:
+		break;
+	default:
 		DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
 		return -1;
 	}
-- 
2.17.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA
  2019-11-06 14:43 [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA Nipun Gupta
  2019-11-06 14:43 ` [dpdk-dev] [PATCH 2/2] event/dpaa2: support ordered queue case type Nipun Gupta
@ 2019-11-07  9:57 ` Hemant Agrawal
  2019-11-08 15:00   ` Thomas Monjalon
  1 sibling, 1 reply; 4+ messages in thread
From: Hemant Agrawal @ 2019-11-07  9:57 UTC (permalink / raw)
  To: Nipun Gupta, dev; +Cc: thomas, M.h. Lian, Sachin Saxena, Nipun Gupta

Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA
  2019-11-07  9:57 ` [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA Hemant Agrawal
@ 2019-11-08 15:00   ` Thomas Monjalon
  0 siblings, 0 replies; 4+ messages in thread
From: Thomas Monjalon @ 2019-11-08 15:00 UTC (permalink / raw)
  To: Nipun Gupta, M.h. Lian; +Cc: dev, Hemant Agrawal, Sachin Saxena

07/11/2019 10:57, Hemant Agrawal:
> Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Applied, thanks



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-11-08 15:00 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-06 14:43 [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA Nipun Gupta
2019-11-06 14:43 ` [dpdk-dev] [PATCH 2/2] event/dpaa2: support ordered queue case type Nipun Gupta
2019-11-07  9:57 ` [dpdk-dev] [PATCH 1/2] raw/dpaa2_qdma: add support for route by port in DMA Hemant Agrawal
2019-11-08 15:00   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).