From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org
Cc: nipun.gupta@nxp.com, thomas@monjalon.net,
Gagandeep Singh <g.singh@nxp.com>
Subject: [dpdk-dev] [PATCH v3 2/7] dma/dpaa: add device probe and remove functionality
Date: Mon, 8 Nov 2021 14:36:59 +0530 [thread overview]
Message-ID: <20211108090704.3585175-3-g.singh@nxp.com> (raw)
In-Reply-To: <20211108090704.3585175-1-g.singh@nxp.com>
This patch add device initialisation functionality.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/dma/dpaa/dpaa_qdma.c | 456 ++++++++++++++++++++++++++++++++++-
drivers/dma/dpaa/dpaa_qdma.h | 236 ++++++++++++++++++
2 files changed, 690 insertions(+), 2 deletions(-)
create mode 100644 drivers/dma/dpaa/dpaa_qdma.h
diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 2ef3ee0c35..f958f78af5 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -3,17 +3,469 @@
*/
#include <rte_dpaa_bus.h>
+#include <rte_dmadev_pmd.h>
+
+#include "dpaa_qdma.h"
+
+static inline int
+ilog2(int x)
+{
+ int log = 0;
+
+ x >>= 1;
+
+ while (x) {
+ log++;
+ x >>= 1;
+ }
+ return log;
+}
+
+static u32
+qdma_readl(void *addr)
+{
+ return QDMA_IN(addr);
+}
+
+static void
+qdma_writel(u32 val, void *addr)
+{
+ QDMA_OUT(addr, val);
+}
+
+static void
+*dma_pool_alloc(int size, int aligned, dma_addr_t *phy_addr)
+{
+ void *virt_addr;
+
+ virt_addr = rte_malloc("dma pool alloc", size, aligned);
+ if (!virt_addr)
+ return NULL;
+
+ *phy_addr = rte_mem_virt2iova(virt_addr);
+
+ return virt_addr;
+}
+
+static void
+dma_pool_free(void *addr)
+{
+ rte_free(addr);
+}
+
+static void
+fsl_qdma_free_chan_resources(struct fsl_qdma_chan *fsl_chan)
+{
+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+ struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+ struct fsl_qdma_comp *comp_temp, *_comp_temp;
+ int id;
+
+ if (--fsl_queue->count)
+ goto finally;
+
+ id = (fsl_qdma->block_base - fsl_queue->block_base) /
+ fsl_qdma->block_offset;
+
+ while (rte_atomic32_read(&wait_task[id]) == 1)
+ rte_delay_us(QDMA_DELAY);
+
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &fsl_queue->comp_used, list) {
+ list_del(&comp_temp->list);
+ dma_pool_free(comp_temp->virt_addr);
+ dma_pool_free(comp_temp->desc_virt_addr);
+ rte_free(comp_temp);
+ }
+
+ list_for_each_entry_safe(comp_temp, _comp_temp,
+ &fsl_queue->comp_free, list) {
+ list_del(&comp_temp->list);
+ dma_pool_free(comp_temp->virt_addr);
+ dma_pool_free(comp_temp->desc_virt_addr);
+ rte_free(comp_temp);
+ }
+
+finally:
+ fsl_qdma->desc_allocated--;
+}
+
+static struct fsl_qdma_queue
+*fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
+{
+ struct fsl_qdma_queue *queue_head, *queue_temp;
+ int len, i, j;
+ int queue_num;
+ int blocks;
+ unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
+
+ queue_num = fsl_qdma->n_queues;
+ blocks = fsl_qdma->num_blocks;
+
+ len = sizeof(*queue_head) * queue_num * blocks;
+ queue_head = rte_zmalloc("qdma: queue head", len, 0);
+ if (!queue_head)
+ return NULL;
+
+ for (i = 0; i < FSL_QDMA_QUEUE_MAX; i++)
+ queue_size[i] = QDMA_QUEUE_SIZE;
+
+ for (j = 0; j < blocks; j++) {
+ for (i = 0; i < queue_num; i++) {
+ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
+ queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ goto fail;
+ }
+ queue_temp = queue_head + i + (j * queue_num);
+
+ queue_temp->cq =
+ dma_pool_alloc(sizeof(struct fsl_qdma_format) *
+ queue_size[i],
+ sizeof(struct fsl_qdma_format) *
+ queue_size[i], &queue_temp->bus_addr);
+
+ if (!queue_temp->cq)
+ goto fail;
+
+ memset(queue_temp->cq, 0x0, queue_size[i] *
+ sizeof(struct fsl_qdma_format));
+
+ queue_temp->block_base = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ queue_temp->n_cq = queue_size[i];
+ queue_temp->id = i;
+ queue_temp->count = 0;
+ queue_temp->pending = 0;
+ queue_temp->virt_head = queue_temp->cq;
+
+ }
+ }
+ return queue_head;
+
+fail:
+ for (j = 0; j < blocks; j++) {
+ for (i = 0; i < queue_num; i++) {
+ queue_temp = queue_head + i + (j * queue_num);
+ dma_pool_free(queue_temp->cq);
+ }
+ }
+ rte_free(queue_head);
+
+ return NULL;
+}
+
+static struct
+fsl_qdma_queue *fsl_qdma_prep_status_queue(void)
+{
+ struct fsl_qdma_queue *status_head;
+ unsigned int status_size;
+
+ status_size = QDMA_STATUS_SIZE;
+ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
+ status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
+ return NULL;
+ }
+
+ status_head = rte_zmalloc("qdma: status head", sizeof(*status_head), 0);
+ if (!status_head)
+ return NULL;
+
+ /*
+ * Buffer for queue command
+ */
+ status_head->cq = dma_pool_alloc(sizeof(struct fsl_qdma_format) *
+ status_size,
+ sizeof(struct fsl_qdma_format) *
+ status_size,
+ &status_head->bus_addr);
+
+ if (!status_head->cq) {
+ rte_free(status_head);
+ return NULL;
+ }
+
+ memset(status_head->cq, 0x0, status_size *
+ sizeof(struct fsl_qdma_format));
+ status_head->n_cq = status_size;
+ status_head->virt_head = status_head->cq;
+
+ return status_head;
+}
+
+static int
+fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
+{
+ void *ctrl = fsl_qdma->ctrl_base;
+ void *block;
+ int i, count = RETRIES;
+ unsigned int j;
+ u32 reg;
+
+ /* Disable the command queue and wait for idle state. */
+ reg = qdma_readl(ctrl + FSL_QDMA_DMR);
+ reg |= FSL_QDMA_DMR_DQD;
+ qdma_writel(reg, ctrl + FSL_QDMA_DMR);
+ for (j = 0; j < fsl_qdma->num_blocks; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
+ qdma_writel(0, block + FSL_QDMA_BCQMR(i));
+ }
+ while (true) {
+ reg = qdma_readl(ctrl + FSL_QDMA_DSR);
+ if (!(reg & FSL_QDMA_DSR_DB))
+ break;
+ if (count-- < 0)
+ return -EBUSY;
+ rte_delay_us(100);
+ }
+
+ for (j = 0; j < fsl_qdma->num_blocks; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+
+ /* Disable status queue. */
+ qdma_writel(0, block + FSL_QDMA_BSQMR);
+
+ /*
+ * clear the command queue interrupt detect register for
+ * all queues.
+ */
+ qdma_writel(0xffffffff, block + FSL_QDMA_BCQIDR(0));
+ }
+
+ return 0;
+}
+
+static int
+fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
+{
+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
+ struct fsl_qdma_queue *temp;
+ void *ctrl = fsl_qdma->ctrl_base;
+ void *block;
+ u32 i, j;
+ u32 reg;
+ int ret, val;
+
+ /* Try to halt the qDMA engine first. */
+ ret = fsl_qdma_halt(fsl_qdma);
+ if (ret)
+ return ret;
+
+ for (j = 0; j < fsl_qdma->num_blocks; j++) {
+ block = fsl_qdma->block_base +
+ FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
+ for (i = 0; i < fsl_qdma->n_queues; i++) {
+ temp = fsl_queue + i + (j * fsl_qdma->n_queues);
+ /*
+ * Initialize Command Queue registers to
+ * point to the first
+ * command descriptor in memory.
+ * Dequeue Pointer Address Registers
+ * Enqueue Pointer Address Registers
+ */
+
+ qdma_writel(lower_32_bits(temp->bus_addr),
+ block + FSL_QDMA_BCQDPA_SADDR(i));
+ qdma_writel(upper_32_bits(temp->bus_addr),
+ block + FSL_QDMA_BCQEDPA_SADDR(i));
+ qdma_writel(lower_32_bits(temp->bus_addr),
+ block + FSL_QDMA_BCQEPA_SADDR(i));
+ qdma_writel(upper_32_bits(temp->bus_addr),
+ block + FSL_QDMA_BCQEEPA_SADDR(i));
+
+ /* Initialize the queue mode. */
+ reg = FSL_QDMA_BCQMR_EN;
+ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
+ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
+ qdma_writel(reg, block + FSL_QDMA_BCQMR(i));
+ }
+
+ /*
+ * Workaround for erratum: ERR010812.
+ * We must enable XOFF to avoid the enqueue rejection occurs.
+ * Setting SQCCMR ENTER_WM to 0x20.
+ */
+
+ qdma_writel(FSL_QDMA_SQCCMR_ENTER_WM,
+ block + FSL_QDMA_SQCCMR);
+
+ /*
+ * Initialize status queue registers to point to the first
+ * command descriptor in memory.
+ * Dequeue Pointer Address Registers
+ * Enqueue Pointer Address Registers
+ */
+
+ qdma_writel(
+ upper_32_bits(fsl_qdma->status[j]->bus_addr),
+ block + FSL_QDMA_SQEEPAR);
+ qdma_writel(
+ lower_32_bits(fsl_qdma->status[j]->bus_addr),
+ block + FSL_QDMA_SQEPAR);
+ qdma_writel(
+ upper_32_bits(fsl_qdma->status[j]->bus_addr),
+ block + FSL_QDMA_SQEDPAR);
+ qdma_writel(
+ lower_32_bits(fsl_qdma->status[j]->bus_addr),
+ block + FSL_QDMA_SQDPAR);
+ /* Desiable status queue interrupt. */
+
+ qdma_writel(0x0, block + FSL_QDMA_BCQIER(0));
+ qdma_writel(0x0, block + FSL_QDMA_BSQICR);
+ qdma_writel(0x0, block + FSL_QDMA_CQIER);
+
+ /* Initialize the status queue mode. */
+ reg = FSL_QDMA_BSQMR_EN;
+ val = ilog2(fsl_qdma->status[j]->n_cq) - 6;
+ reg |= FSL_QDMA_BSQMR_CQ_SIZE(val);
+ qdma_writel(reg, block + FSL_QDMA_BSQMR);
+ }
+
+ reg = qdma_readl(ctrl + FSL_QDMA_DMR);
+ reg &= ~FSL_QDMA_DMR_DQD;
+ qdma_writel(reg, ctrl + FSL_QDMA_DMR);
+
+ return 0;
+}
+
+static void
+dma_release(void *fsl_chan)
+{
+ ((struct fsl_qdma_chan *)fsl_chan)->free = true;
+ fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
+}
+
+static int
+dpaa_qdma_init(struct rte_dma_dev *dmadev)
+{
+ struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+ struct fsl_qdma_chan *fsl_chan;
+ uint64_t phys_addr;
+ unsigned int len;
+ int ccsr_qdma_fd;
+ int regs_size;
+ int ret;
+ u32 i;
+
+ fsl_qdma->desc_allocated = 0;
+ fsl_qdma->n_chans = VIRT_CHANNELS;
+ fsl_qdma->n_queues = QDMA_QUEUES;
+ fsl_qdma->num_blocks = QDMA_BLOCKS;
+ fsl_qdma->block_offset = QDMA_BLOCK_OFFSET;
+
+ len = sizeof(*fsl_chan) * fsl_qdma->n_chans;
+ fsl_qdma->chans = rte_zmalloc("qdma: fsl chans", len, 0);
+ if (!fsl_qdma->chans)
+ return -1;
+
+ len = sizeof(struct fsl_qdma_queue *) * fsl_qdma->num_blocks;
+ fsl_qdma->status = rte_zmalloc("qdma: fsl status", len, 0);
+ if (!fsl_qdma->status) {
+ rte_free(fsl_qdma->chans);
+ return -1;
+ }
+
+ for (i = 0; i < fsl_qdma->num_blocks; i++) {
+ rte_atomic32_init(&wait_task[i]);
+ fsl_qdma->status[i] = fsl_qdma_prep_status_queue();
+ if (!fsl_qdma->status[i])
+ goto err;
+ }
+
+ ccsr_qdma_fd = open("/dev/mem", O_RDWR);
+ if (unlikely(ccsr_qdma_fd < 0))
+ goto err;
+
+ regs_size = fsl_qdma->block_offset * (fsl_qdma->num_blocks + 2);
+ phys_addr = QDMA_CCSR_BASE;
+ fsl_qdma->ctrl_base = mmap(NULL, regs_size, PROT_READ |
+ PROT_WRITE, MAP_SHARED,
+ ccsr_qdma_fd, phys_addr);
+
+ close(ccsr_qdma_fd);
+ if (fsl_qdma->ctrl_base == MAP_FAILED)
+ goto err;
+
+ fsl_qdma->status_base = fsl_qdma->ctrl_base + QDMA_BLOCK_OFFSET;
+ fsl_qdma->block_base = fsl_qdma->status_base + QDMA_BLOCK_OFFSET;
+
+ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(fsl_qdma);
+ if (!fsl_qdma->queue) {
+ munmap(fsl_qdma->ctrl_base, regs_size);
+ goto err;
+ }
+
+ for (i = 0; i < fsl_qdma->n_chans; i++) {
+ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+ fsl_chan->qdma = fsl_qdma;
+ fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
+ fsl_qdma->num_blocks);
+ fsl_chan->free = true;
+ }
+
+ ret = fsl_qdma_reg_init(fsl_qdma);
+ if (ret) {
+ munmap(fsl_qdma->ctrl_base, regs_size);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ rte_free(fsl_qdma->chans);
+ rte_free(fsl_qdma->status);
+
+ return -1;
+}
static int
dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver *dpaa_drv,
- __rte_unused struct rte_dpaa_device *dpaa_dev)
+ struct rte_dpaa_device *dpaa_dev)
{
+ struct rte_dma_dev *dmadev;
+ int ret;
+
+ dmadev = rte_dma_pmd_allocate(dpaa_dev->device.name,
+ rte_socket_id(),
+ sizeof(struct fsl_qdma_engine));
+ if (!dmadev)
+ return -EINVAL;
+
+ dpaa_dev->dmadev = dmadev;
+
+ /* Invoke PMD device initialization function */
+ ret = dpaa_qdma_init(dmadev);
+ if (ret) {
+ (void)rte_dma_pmd_release(dpaa_dev->device.name);
+ return ret;
+ }
+
+ dmadev->state = RTE_DMA_DEV_READY;
return 0;
}
static int
-dpaa_qdma_remove(__rte_unused struct rte_dpaa_device *dpaa_dev)
+dpaa_qdma_remove(struct rte_dpaa_device *dpaa_dev)
{
+ struct rte_dma_dev *dmadev = dpaa_dev->dmadev;
+ struct fsl_qdma_engine *fsl_qdma = dmadev->data->dev_private;
+ int i = 0, max = QDMA_QUEUES * QDMA_BLOCKS;
+
+ for (i = 0; i < max; i++) {
+ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+ if (fsl_chan->free == false)
+ dma_release(fsl_chan);
+ }
+
+ rte_free(fsl_qdma->status);
+ rte_free(fsl_qdma->chans);
+
+ (void)rte_dma_pmd_release(dpaa_dev->device.name);
+
return 0;
}
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
new file mode 100644
index 0000000000..c05620b740
--- /dev/null
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 NXP
+ */
+
+#ifndef _DPAA_QDMA_H_
+#define _DPAA_QDMA_H_
+
+#include <rte_io.h>
+
+#define CORE_NUMBER 4
+#define RETRIES 5
+
+#define FSL_QDMA_DMR 0x0
+#define FSL_QDMA_DSR 0x4
+#define FSL_QDMA_DEIER 0xe00
+#define FSL_QDMA_DEDR 0xe04
+#define FSL_QDMA_DECFDW0R 0xe10
+#define FSL_QDMA_DECFDW1R 0xe14
+#define FSL_QDMA_DECFDW2R 0xe18
+#define FSL_QDMA_DECFDW3R 0xe1c
+#define FSL_QDMA_DECFQIDR 0xe30
+#define FSL_QDMA_DECBR 0xe34
+
+#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
+#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
+#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
+#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
+#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
+#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
+#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
+#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
+
+#define FSL_QDMA_SQEDPAR 0x808
+#define FSL_QDMA_SQDPAR 0x80c
+#define FSL_QDMA_SQEEPAR 0x810
+#define FSL_QDMA_SQEPAR 0x814
+#define FSL_QDMA_BSQMR 0x800
+#define FSL_QDMA_BSQSR 0x804
+#define FSL_QDMA_BSQICR 0x828
+#define FSL_QDMA_CQMR 0xa00
+#define FSL_QDMA_CQDSCR1 0xa08
+#define FSL_QDMA_CQDSCR2 0xa0c
+#define FSL_QDMA_CQIER 0xa10
+#define FSL_QDMA_CQEDR 0xa14
+#define FSL_QDMA_SQCCMR 0xa20
+
+#define FSL_QDMA_SQICR_ICEN
+
+#define FSL_QDMA_CQIDR_CQT 0xff000000
+#define FSL_QDMA_CQIDR_SQPE 0x800000
+#define FSL_QDMA_CQIDR_SQT 0x8000
+
+#define FSL_QDMA_BCQIER_CQTIE 0x8000
+#define FSL_QDMA_BCQIER_CQPEIE 0x800000
+#define FSL_QDMA_BSQICR_ICEN 0x80000000
+#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
+#define FSL_QDMA_CQIER_MEIE 0x80000000
+#define FSL_QDMA_CQIER_TEIE 0x1
+#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
+
+#define FSL_QDMA_QUEUE_MAX 8
+
+#define FSL_QDMA_BCQMR_EN 0x80000000
+#define FSL_QDMA_BCQMR_EI 0x40000000
+#define FSL_QDMA_BCQMR_EI_BE 0x40
+#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
+#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
+
+#define FSL_QDMA_BCQSR_QF 0x10000
+#define FSL_QDMA_BCQSR_XOFF 0x1
+#define FSL_QDMA_BCQSR_QF_XOFF_BE 0x1000100
+
+#define FSL_QDMA_BSQMR_EN 0x80000000
+#define FSL_QDMA_BSQMR_DI 0x40000000
+#define FSL_QDMA_BSQMR_DI_BE 0x40
+#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
+
+#define FSL_QDMA_BSQSR_QE 0x20000
+#define FSL_QDMA_BSQSR_QE_BE 0x200
+#define FSL_QDMA_BSQSR_QF 0x10000
+
+#define FSL_QDMA_DMR_DQD 0x40000000
+#define FSL_QDMA_DSR_DB 0x80000000
+
+#define FSL_QDMA_COMMAND_BUFFER_SIZE 64
+#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
+#define FSL_QDMA_QUEUE_NUM_MAX 8
+
+#define FSL_QDMA_CMD_RWTTYPE 0x4
+#define FSL_QDMA_CMD_LWC 0x2
+
+#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
+#define FSL_QDMA_CMD_NS_OFFSET 27
+#define FSL_QDMA_CMD_DQOS_OFFSET 24
+#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
+#define FSL_QDMA_CMD_DSEN_OFFSET 19
+#define FSL_QDMA_CMD_LWC_OFFSET 16
+
+#define QDMA_CCDF_STATUS 20
+#define QDMA_CCDF_OFFSET 20
+#define QDMA_CCDF_MASK GENMASK(28, 20)
+#define QDMA_CCDF_FOTMAT BIT(29)
+#define QDMA_CCDF_SER BIT(30)
+
+#define QDMA_SG_FIN BIT(30)
+#define QDMA_SG_EXT BIT(31)
+#define QDMA_SG_LEN_MASK GENMASK(29, 0)
+
+#define QDMA_BIG_ENDIAN 1
+#define COMP_TIMEOUT 100000
+#define COMMAND_QUEUE_OVERFLLOW 10
+
+/* qdma engine attribute */
+#define QDMA_QUEUE_SIZE 64
+#define QDMA_STATUS_SIZE 64
+#define QDMA_CCSR_BASE 0x8380000
+#define VIRT_CHANNELS 32
+#define QDMA_BLOCK_OFFSET 0x10000
+#define QDMA_BLOCKS 4
+#define QDMA_QUEUES 8
+#define QDMA_DELAY 1000
+
+#ifdef QDMA_BIG_ENDIAN
+#define QDMA_IN(addr) be32_to_cpu(rte_read32(addr))
+#define QDMA_OUT(addr, val) rte_write32(be32_to_cpu(val), addr)
+#define QDMA_IN_BE(addr) rte_read32(addr)
+#define QDMA_OUT_BE(addr, val) rte_write32(val, addr)
+#else
+#define QDMA_IN(addr) rte_read32(addr)
+#define QDMA_OUT(addr, val) rte_write32(val, addr)
+#define QDMA_IN_BE(addr) be32_to_cpu(rte_write32(addr))
+#define QDMA_OUT_BE(addr, val) rte_write32(be32_to_cpu(val), addr)
+#endif
+
+#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
+ (((fsl_qdma_engine)->block_offset) * (x))
+
+typedef void (*dma_call_back)(void *params);
+
+/* qDMA Command Descriptor Formats */
+struct fsl_qdma_format {
+ __le32 status; /* ser, status */
+ __le32 cfg; /* format, offset */
+ union {
+ struct {
+ __le32 addr_lo; /* low 32-bits of 40-bit address */
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ u8 __reserved1[2];
+ u8 cfg8b_w1; /* dd, queue */
+ };
+ __le64 data;
+ };
+};
+
+/* qDMA Source Descriptor Format */
+struct fsl_qdma_sdf {
+ __le32 rev3;
+ __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
+ __le32 rev5;
+ __le32 cmd;
+};
+
+/* qDMA Destination Descriptor Format */
+struct fsl_qdma_ddf {
+ __le32 rev1;
+ __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
+ __le32 rev3;
+ __le32 cmd;
+};
+
+enum dma_status {
+ DMA_COMPLETE,
+ DMA_IN_PROGRESS,
+ DMA_IN_PREPAR,
+ DMA_PAUSED,
+ DMA_ERROR,
+};
+
+struct fsl_qdma_chan {
+ struct fsl_qdma_engine *qdma;
+ struct fsl_qdma_queue *queue;
+ bool free;
+ struct list_head list;
+};
+
+struct fsl_qdma_list {
+ struct list_head dma_list;
+};
+
+struct fsl_qdma_queue {
+ struct fsl_qdma_format *virt_head;
+ struct list_head comp_used;
+ struct list_head comp_free;
+ dma_addr_t bus_addr;
+ u32 n_cq;
+ u32 id;
+ u32 count;
+ u32 pending;
+ struct fsl_qdma_format *cq;
+ void *block_base;
+};
+
+struct fsl_qdma_comp {
+ dma_addr_t bus_addr;
+ dma_addr_t desc_bus_addr;
+ void *virt_addr;
+ int index;
+ void *desc_virt_addr;
+ struct fsl_qdma_chan *qchan;
+ dma_call_back call_back_func;
+ void *params;
+ struct list_head list;
+};
+
+struct fsl_qdma_engine {
+ int desc_allocated;
+ void *ctrl_base;
+ void *status_base;
+ void *block_base;
+ u32 n_chans;
+ u32 n_queues;
+ int error_irq;
+ struct fsl_qdma_queue *queue;
+ struct fsl_qdma_queue **status;
+ struct fsl_qdma_chan *chans;
+ u32 num_blocks;
+ u8 free_block_id;
+ u32 vchan_map[4];
+ int block_offset;
+};
+
+static rte_atomic32_t wait_task[CORE_NUMBER];
+
+#endif /* _DPAA_QDMA_H_ */
--
2.25.1
next prev parent reply other threads:[~2021-11-08 9:07 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-09 11:14 [dpdk-dev] [PATCH 0/6] Introduce DPAA DMA driver Gagandeep Singh
2021-09-09 11:14 ` [dpdk-dev] [PATCH 1/6] dma/dpaa: introduce " Gagandeep Singh
2021-09-09 11:14 ` [dpdk-dev] [PATCH 2/6] dma/dpaa: add device probe and remove functionality Gagandeep Singh
2021-09-09 11:14 ` [dpdk-dev] [PATCH 3/6] dma/dpaa: add driver logs Gagandeep Singh
2021-09-09 11:14 ` [dpdk-dev] [PATCH 4/6] dma/dpaa: support basic operations Gagandeep Singh
2021-09-09 11:14 ` [dpdk-dev] [PATCH 5/6] dma/dpaa: support DMA operations Gagandeep Singh
2021-09-09 11:15 ` [dpdk-dev] [PATCH 6/6] doc: add user guide of DPAA DMA driver Gagandeep Singh
2021-10-27 14:57 ` [dpdk-dev] [PATCH 0/6] Introduce " Thomas Monjalon
2021-10-28 4:34 ` Gagandeep Singh
2021-11-01 8:51 ` [dpdk-dev] [PATCH v2 " Gagandeep Singh
2021-11-01 8:51 ` [dpdk-dev] [PATCH v2 1/6] dma/dpaa: introduce " Gagandeep Singh
2021-11-02 8:51 ` fengchengwen
2021-11-02 15:27 ` Thomas Monjalon
2021-11-08 9:06 ` [dpdk-dev] [PATCH v3 0/7] Introduce " Gagandeep Singh
2021-11-08 9:06 ` [dpdk-dev] [PATCH v3 1/7] dma/dpaa: introduce " Gagandeep Singh
2021-11-09 4:39 ` [dpdk-dev] [PATCH v4 0/5] Introduce " Gagandeep Singh
2021-11-09 4:39 ` [dpdk-dev] [PATCH v4 1/5] dma/dpaa: introduce " Gagandeep Singh
2021-11-09 14:44 ` Thomas Monjalon
2021-11-10 5:17 ` Gagandeep Singh
2021-11-09 4:39 ` [dpdk-dev] [PATCH v4 2/5] dma/dpaa: add device probe and remove functionality Gagandeep Singh
2021-11-09 4:39 ` [dpdk-dev] [PATCH v4 3/5] dma/dpaa: support basic operations Gagandeep Singh
2021-11-09 4:39 ` [dpdk-dev] [PATCH v4 4/5] dma/dpaa: support DMA operations Gagandeep Singh
2021-11-09 4:39 ` [dpdk-dev] [PATCH v4 5/5] dma/dpaa: support statistics Gagandeep Singh
2021-11-10 12:48 ` [dpdk-dev] [PATCH v4 0/5] Introduce DPAA DMA driver Thomas Monjalon
2021-11-08 9:06 ` Gagandeep Singh [this message]
2021-11-08 9:07 ` [dpdk-dev] [PATCH v3 3/7] dma/dpaa: add driver logs Gagandeep Singh
2021-11-08 9:38 ` Thomas Monjalon
2021-11-08 9:07 ` [dpdk-dev] [PATCH v3 4/7] dma/dpaa: support basic operations Gagandeep Singh
2021-11-08 9:07 ` [dpdk-dev] [PATCH v3 5/7] dma/dpaa: support DMA operations Gagandeep Singh
2021-11-08 9:07 ` [dpdk-dev] [PATCH v3 6/7] dma/dpaa: support statistics Gagandeep Singh
2021-11-08 9:07 ` [dpdk-dev] [PATCH v3 7/7] doc: add user guide of DPAA DMA driver Gagandeep Singh
2021-11-08 9:37 ` Thomas Monjalon
2021-11-08 9:39 ` Thomas Monjalon
2021-11-01 8:51 ` [dpdk-dev] [PATCH v2 2/6] dma/dpaa: add device probe and remove functionality Gagandeep Singh
2021-11-02 9:07 ` fengchengwen
2021-11-01 8:51 ` [dpdk-dev] [PATCH v2 3/6] dma/dpaa: add driver logs Gagandeep Singh
2021-11-01 8:51 ` [dpdk-dev] [PATCH v2 4/6] dma/dpaa: support basic operations Gagandeep Singh
2021-11-02 9:21 ` fengchengwen
2021-11-01 8:51 ` [dpdk-dev] [PATCH v2 5/6] dma/dpaa: support DMA operations Gagandeep Singh
2021-11-02 9:31 ` fengchengwen
2021-11-08 9:06 ` Gagandeep Singh
2021-11-01 8:51 ` [dpdk-dev] [PATCH v2 6/6] doc: add user guide of DPAA DMA driver Gagandeep Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211108090704.3585175-3-g.singh@nxp.com \
--to=g.singh@nxp.com \
--cc=dev@dpdk.org \
--cc=nipun.gupta@nxp.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).