From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v2 04/30] dma/dpaa2: multiple process support
Date: Mon, 22 Jul 2024 17:28:17 +0530 [thread overview]
Message-ID: <20240722115843.1830105-5-g.singh@nxp.com> (raw)
In-Reply-To: <20240722115843.1830105-1-g.singh@nxp.com>
From: Jun Yang <jun.yang@nxp.com>
Support multiple processes for dpaa2 dma.
1) Move queue configuration procedure from init function to
device configuration function which is called by user.
2) Instances of dpaa2_dpdmai_dev and qdma_device are allocated
from primary process and shared between multiple processes.
3) MC reg is per process mapped.
4) User is responsible to check vq number configured before using
dma device to identify if this device is occupied by other process.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/dma/dpaa2/dpaa2_qdma.c | 405 ++++++++++++++++++++-------------
drivers/dma/dpaa2/dpaa2_qdma.h | 6 +-
2 files changed, 254 insertions(+), 157 deletions(-)
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 15d3776603..44b82c139e 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
*/
#include <rte_eal.h>
@@ -19,6 +19,8 @@
static uint32_t dpaa2_coherent_no_alloc_cache;
static uint32_t dpaa2_coherent_alloc_cache;
+static struct fsl_mc_io s_proc_mc_reg;
+
static inline int
qdma_cntx_idx_ring_eq(struct qdma_cntx_idx_ring *ring,
const uint16_t *elem, uint16_t nb,
@@ -960,6 +962,9 @@ dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
dev_info->max_vchans = dpdmai_dev->num_queues;
dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+ dev_info->dev_name = dev->device->name;
+ if (dpdmai_dev->qdma_dev)
+ dev_info->nb_vchans = dpdmai_dev->qdma_dev->num_vqs;
return 0;
}
@@ -969,25 +974,102 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
const struct rte_dma_conf *dev_conf,
uint32_t conf_sz)
{
- char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
uint16_t i;
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpdmai_rx_queue_attr rx_attr;
+ struct dpdmai_tx_queue_attr tx_attr;
+ struct dpaa2_queue *rxq;
+ int ret = 0;
DPAA2_QDMA_FUNC_TRACE();
RTE_SET_USED(conf_sz);
- /* In case QDMA device is not in stopped state, return -EBUSY */
- if (qdma_dev->state == 1) {
- DPAA2_QDMA_ERR("%s Not stopped, configure failed.",
- dev->data->dev_name);
- return -EBUSY;
+ if (dev_conf->nb_vchans > dpdmai_dev->num_queues) {
+ DPAA2_QDMA_ERR("%s config queues(%d) > hw queues(%d)",
+ dev->data->dev_name, dev_conf->nb_vchans,
+ dpdmai_dev->num_queues);
+
+ return -ENOTSUP;
+ }
+
+ if (qdma_dev->vqs) {
+ DPAA2_QDMA_DEBUG("%s: queues de-config(%d)/re-config(%d)",
+ dev->data->dev_name,
+ qdma_dev->num_vqs, dev_conf->nb_vchans);
+ for (i = 0; i < qdma_dev->num_vqs; i++) {
+ if ((qdma_dev->vqs[i].num_enqueues !=
+ qdma_dev->vqs[i].num_dequeues) &&
+ !qdma_dev->is_silent) {
+ DPAA2_QDMA_ERR("VQ(%d) %"PRIu64" jobs in dma.",
+ i, qdma_dev->vqs[i].num_enqueues -
+ qdma_dev->vqs[i].num_dequeues);
+ return -EBUSY;
+ }
+ }
+ for (i = 0; i < qdma_dev->num_vqs; i++) {
+ if (qdma_dev->vqs[i].fle_pool) {
+ rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+ qdma_dev->vqs[i].fle_pool = NULL;
+ }
+ if (qdma_dev->vqs[i].ring_cntx_idx) {
+ rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+ qdma_dev->vqs[i].ring_cntx_idx = NULL;
+ }
+ rxq = &dpdmai_dev->rx_queue[i];
+ if (rxq->q_storage) {
+ DPAA2_QDMA_DEBUG("%s rxq[%d] re-configure",
+ dev->data->dev_name, i);
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ rxq->q_storage = NULL;
+ }
+ }
+ rte_free(qdma_dev->vqs);
+ qdma_dev->vqs = NULL;
+ qdma_dev->num_vqs = 0;
+ }
+
+ /* Set up Rx Queues */
+ for (i = 0; i < dev_conf->nb_vchans; i++) {
+ memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+ rxq = &dpdmai_dev->rx_queue[i];
+ ret = dpdmai_set_rx_queue(&s_proc_mc_reg,
+ CMD_PRI_LOW,
+ dpdmai_dev->token,
+ i, 0, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_QDMA_ERR("%s RXQ%d set failed(%d)",
+ dev->data->dev_name, i, ret);
+ return ret;
+ }
+ }
+
+ /* Get Rx and Tx queues FQID's */
+ for (i = 0; i < dev_conf->nb_vchans; i++) {
+ ret = dpdmai_get_rx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+ dpdmai_dev->token, i, 0, &rx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
+ dpdmai_dev->dpdmai_id, i, ret);
+ return ret;
+ }
+ dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpdmai_get_tx_queue(&s_proc_mc_reg, CMD_PRI_LOW,
+ dpdmai_dev->token, i, 0, &tx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
+ dpdmai_dev->dpdmai_id, i, ret);
+ return ret;
+ }
+ dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
}
/* Allocate Virtual Queues */
- sprintf(name, "qdma_%d_vq", dev->data->dev_id);
- qdma_dev->vqs = rte_malloc(name,
+ qdma_dev->vqs = rte_zmalloc(NULL,
(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
RTE_CACHE_LINE_SIZE);
if (!qdma_dev->vqs) {
@@ -995,13 +1077,50 @@ dpaa2_qdma_configure(struct rte_dma_dev *dev,
dev->data->dev_name, dev_conf->nb_vchans);
return -ENOMEM;
}
- for (i = 0; i < dev_conf->nb_vchans; i++)
+ for (i = 0; i < dev_conf->nb_vchans; i++) {
qdma_dev->vqs[i].vq_id = i;
+ rxq = &dpdmai_dev->rx_queue[i];
+ /* Allocate DQ storage for the DPDMAI Rx queues */
+ rxq->q_storage = rte_zmalloc(NULL,
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_QDMA_ERR("%s Q[%d] storage alloc failed",
+ dev->data->dev_name, i);
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_QDMA_ERR("%s Q[%d] dq storage alloc failed",
+ dev->data->dev_name, i);
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+ }
qdma_dev->num_vqs = dev_conf->nb_vchans;
qdma_dev->is_silent = dev_conf->enable_silent;
return 0;
+
+alloc_failed:
+ for (i = 0; i < dev_conf->nb_vchans; i++) {
+ rxq = &dpdmai_dev->rx_queue[i];
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ rxq->q_storage = NULL;
+ }
+ }
+
+ rte_free(qdma_dev->vqs);
+ qdma_dev->vqs = NULL;
+ qdma_dev->num_vqs = 0;
+
+ return ret;
}
static int
@@ -1130,11 +1249,17 @@ static int
dpaa2_qdma_start(struct rte_dma_dev *dev)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
- struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ int ret;
DPAA2_QDMA_FUNC_TRACE();
- qdma_dev->state = 1;
+ /* Enable the device */
+ ret = dpdmai_enable(&s_proc_mc_reg, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+ return ret;
+ }
return 0;
}
@@ -1143,30 +1268,33 @@ static int
dpaa2_qdma_stop(struct rte_dma_dev *dev)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
- struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ int ret;
DPAA2_QDMA_FUNC_TRACE();
- qdma_dev->state = 0;
+ /* Disable the device */
+ ret = dpdmai_disable(&s_proc_mc_reg, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("Disable device failed with err: %d", ret);
+ return ret;
+ }
return 0;
}
static int
-dpaa2_qdma_reset(struct rte_dma_dev *dev)
+dpaa2_qdma_close(struct rte_dma_dev *dev)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ struct dpaa2_queue *rxq;
int i;
DPAA2_QDMA_FUNC_TRACE();
- /* In case QDMA device is not in stopped state, return -EBUSY */
- if (qdma_dev->state == 1) {
- DPAA2_QDMA_ERR("%s Not stopped, reset failed.",
- dev->data->dev_name);
- return -EBUSY;
- }
+ if (!qdma_dev)
+ return 0;
/* In case there are pending jobs on any VQ, return -EBUSY */
for (i = 0; i < qdma_dev->num_vqs; i++) {
@@ -1180,8 +1308,31 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
}
}
- rte_free(qdma_dev->vqs);
- qdma_dev->vqs = NULL;
+ /* Free RXQ storages */
+ for (i = 0; i < qdma_dev->num_vqs; i++) {
+ rxq = &dpdmai_dev->rx_queue[i];
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ rxq->q_storage = NULL;
+ }
+ }
+
+ if (qdma_dev->vqs) {
+ /* Free RXQ fle pool */
+ for (i = 0; i < qdma_dev->num_vqs; i++) {
+ if (qdma_dev->vqs[i].fle_pool) {
+ rte_mempool_free(qdma_dev->vqs[i].fle_pool);
+ qdma_dev->vqs[i].fle_pool = NULL;
+ }
+ if (qdma_dev->vqs[i].ring_cntx_idx) {
+ rte_free(qdma_dev->vqs[i].ring_cntx_idx);
+ qdma_dev->vqs[i].ring_cntx_idx = NULL;
+ }
+ }
+ rte_free(qdma_dev->vqs);
+ qdma_dev->vqs = NULL;
+ }
/* Reset QDMA device structure */
qdma_dev->num_vqs = 0;
@@ -1190,18 +1341,8 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
}
static int
-dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
-{
- DPAA2_QDMA_FUNC_TRACE();
-
- dpaa2_qdma_reset(dev);
-
- return 0;
-}
-
-static int
-dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev, uint16_t vchan,
- struct rte_dma_stats *rte_stats, uint32_t size)
+dpaa2_qdma_stats_get(const struct rte_dma_dev *dmadev,
+ uint16_t vchan, struct rte_dma_stats *rte_stats, uint32_t size)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dmadev->data->dev_private;
struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
@@ -1256,56 +1397,97 @@ static int
dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
- struct dpaa2_queue *rxq;
- int ret, i;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ int ret;
DPAA2_QDMA_FUNC_TRACE();
- ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
- dpdmai_dev->token);
- if (ret) {
- DPAA2_QDMA_ERR("dpdmai(%d) disable failed",
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ DPAA2_QDMA_DEBUG("Un-attach DMA(%d) in the 2nd proess.",
dpdmai_dev->dpdmai_id);
- }
-
- /* Set up the DQRR storage for Rx */
- for (i = 0; i < dpdmai_dev->num_queues; i++) {
- rxq = &dpdmai_dev->rx_queue[i];
- if (rxq->q_storage) {
- dpaa2_free_dq_storage(rxq->q_storage);
- rte_free(rxq->q_storage);
- }
+ return 0;
}
/* Close the device at underlying layer*/
- ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+ ret = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+ dpdmai_dev->token);
if (ret) {
- DPAA2_QDMA_ERR("dpdmai(%d) close failed",
- dpdmai_dev->dpdmai_id);
+ DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+ dpdmai_dev->dpdmai_id, ret);
+
+ return ret;
+ }
+
+ if (qdma_dev) {
+ rte_free(qdma_dev);
+ dpdmai_dev->qdma_dev = NULL;
}
return ret;
}
static int
-dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
+dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, uint32_t dpdmai_id)
{
struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
- struct dpdmai_rx_queue_cfg rx_queue_cfg;
struct dpdmai_attr attr;
- struct dpdmai_rx_queue_attr rx_attr;
- struct dpdmai_tx_queue_attr tx_attr;
- struct dpaa2_queue *rxq;
- int ret, i;
+ int ret, err;
DPAA2_QDMA_FUNC_TRACE();
+ if (!dpaa2_coherent_no_alloc_cache) {
+ if (dpaa2_svr_family == SVR_LX2160A) {
+ dpaa2_coherent_no_alloc_cache =
+ DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
+ dpaa2_coherent_alloc_cache =
+ DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
+ } else {
+ dpaa2_coherent_no_alloc_cache =
+ DPAA2_COHERENT_NO_ALLOCATE_CACHE;
+ dpaa2_coherent_alloc_cache =
+ DPAA2_COHERENT_ALLOCATE_CACHE;
+ }
+ }
+
+ if (!s_proc_mc_reg.regs)
+ s_proc_mc_reg.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ DPAA2_QDMA_DEBUG("Attach DMA(%d) in the 2nd proess.",
+ dpdmai_id);
+ if (dpdmai_id != dpdmai_dev->dpdmai_id) {
+ DPAA2_QDMA_ERR("Fatal: Attach DMA(%d) to DMA(%d)",
+ dpdmai_id, dpdmai_dev->dpdmai_id);
+ return -EINVAL;
+ }
+ if (!dpdmai_dev->qdma_dev) {
+ DPAA2_QDMA_ERR("Fatal: DMA(%d) qdma_dev NOT allocated",
+ dpdmai_id);
+ return -ENOMEM;
+ }
+ if (dpdmai_dev->qdma_dev->num_vqs) {
+ DPAA2_QDMA_WARN("DMA(%d) %d vqs were configured",
+ dpdmai_id, dpdmai_dev->qdma_dev->num_vqs);
+ }
+
+ return 0;
+ }
+
/* Open DPDMAI device */
dpdmai_dev->dpdmai_id = dpdmai_id;
- dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
- dpdmai_dev->qdma_dev = rte_malloc(NULL,
+
+ if (dpdmai_dev->qdma_dev) {
+ rte_free(dpdmai_dev->qdma_dev);
+ dpdmai_dev->qdma_dev = NULL;
+ }
+ dpdmai_dev->qdma_dev = rte_zmalloc(NULL,
sizeof(struct qdma_device), RTE_CACHE_LINE_SIZE);
- ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ if (!dpdmai_dev->qdma_dev) {
+ DPAA2_QDMA_ERR("DMA(%d) alloc memory failed",
+ dpdmai_id);
+ return -ENOMEM;
+ }
+ ret = dpdmai_open(&s_proc_mc_reg, CMD_PRI_LOW,
dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
if (ret) {
DPAA2_QDMA_ERR("%s: dma(%d) open failed(%d)",
@@ -1314,105 +1496,24 @@ dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
}
/* Get DPDMAI attributes */
- ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ ret = dpdmai_get_attributes(&s_proc_mc_reg, CMD_PRI_LOW,
dpdmai_dev->token, &attr);
if (ret) {
DPAA2_QDMA_ERR("%s: dma(%d) get attributes failed(%d)",
__func__, dpdmai_dev->dpdmai_id, ret);
- goto init_err;
- }
- dpdmai_dev->num_queues = attr.num_of_queues;
-
- /* Set up Rx Queues */
- for (i = 0; i < dpdmai_dev->num_queues; i++) {
- memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
- ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
- CMD_PRI_LOW,
- dpdmai_dev->token,
- i, 0, &rx_queue_cfg);
- if (ret) {
- DPAA2_QDMA_ERR("%s Q%d set failed(%d)",
- dev->data->dev_name, i, ret);
- goto init_err;
- }
-
- /* Allocate DQ storage for the DPDMAI Rx queues */
- rxq = &dpdmai_dev->rx_queue[i];
- rxq->q_storage = rte_malloc("dq_storage",
- sizeof(struct queue_storage_info_t),
- RTE_CACHE_LINE_SIZE);
- if (!rxq->q_storage) {
- DPAA2_QDMA_ERR("%s DQ info(Q%d) alloc failed",
- dev->data->dev_name, i);
- ret = -ENOMEM;
- goto init_err;
- }
-
- memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
- ret = dpaa2_alloc_dq_storage(rxq->q_storage);
- if (ret) {
- DPAA2_QDMA_ERR("%s DQ storage(Q%d) alloc failed(%d)",
- dev->data->dev_name, i, ret);
- goto init_err;
- }
- }
-
- /* Get Rx and Tx queues FQID's */
- for (i = 0; i < dpdmai_dev->num_queues; i++) {
- ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
- dpdmai_dev->token, i, 0, &rx_attr);
- if (ret) {
- DPAA2_QDMA_ERR("Get DPDMAI%d-RXQ%d failed(%d)",
- dpdmai_dev->dpdmai_id, i, ret);
- goto init_err;
- }
- dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
-
- ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
- dpdmai_dev->token, i, 0, &tx_attr);
- if (ret) {
- DPAA2_QDMA_ERR("Get DPDMAI%d-TXQ%d failed(%d)",
- dpdmai_dev->dpdmai_id, i, ret);
- goto init_err;
- }
- dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
- }
-
- /* Enable the device */
- ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
- dpdmai_dev->token);
- if (ret) {
- DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
- goto init_err;
- }
-
- if (!dpaa2_coherent_no_alloc_cache) {
- if (dpaa2_svr_family == SVR_LX2160A) {
- dpaa2_coherent_no_alloc_cache =
- DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
- dpaa2_coherent_alloc_cache =
- DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
- } else {
- dpaa2_coherent_no_alloc_cache =
- DPAA2_COHERENT_NO_ALLOCATE_CACHE;
- dpaa2_coherent_alloc_cache =
- DPAA2_COHERENT_ALLOCATE_CACHE;
+ err = dpdmai_close(&s_proc_mc_reg, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (err) {
+ DPAA2_QDMA_ERR("dpdmai(%d) close failed(%d)",
+ dpdmai_dev->dpdmai_id, err);
}
+ return ret;
}
+ dpdmai_dev->num_queues = attr.num_of_queues;
- DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
-
- /* Reset the QDMA device */
- ret = dpaa2_qdma_reset(dev);
- if (ret) {
- DPAA2_QDMA_ERR("Resetting QDMA failed");
- goto init_err;
- }
+ DPAA2_QDMA_DEBUG("DMA(%d) is initialized.", dpdmai_id);
return 0;
-init_err:
- dpaa2_dpdmai_dev_uninit(dev);
- return ret;
}
static int
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.h b/drivers/dma/dpaa2/dpaa2_qdma.h
index ee34532408..743a43fa14 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.h
+++ b/drivers/dma/dpaa2/dpaa2_qdma.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2022 NXP
+ * Copyright 2018-2023 NXP
*/
#ifndef _DPAA2_QDMA_H_
@@ -200,8 +200,6 @@ struct qdma_cntx_long {
struct dpaa2_dpdmai_dev {
/** Pointer to Next device instance */
TAILQ_ENTRY(dpaa2_qdma_device) next;
- /** handle to DPDMAI object */
- struct fsl_mc_io dpdmai;
/** HW ID for DPDMAI object */
uint32_t dpdmai_id;
/** Tocken of this device */
@@ -267,8 +265,6 @@ struct qdma_device {
struct qdma_virt_queue *vqs;
/** Total number of VQ's */
uint16_t num_vqs;
- /** Device state - started or stopped */
- uint8_t state;
uint8_t is_silent;
};
--
2.25.1
next prev parent reply other threads:[~2024-07-22 11:59 UTC|newest]
Thread overview: 165+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-07-19 10:00 [PATCH 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-19 10:00 ` [PATCH 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-19 10:00 ` [PATCH 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-19 10:01 ` [PATCH 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-19 10:01 ` [PATCH 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-19 10:01 ` [PATCH 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-19 10:01 ` [PATCH 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-19 10:01 ` [PATCH 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-19 10:01 ` [PATCH 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-19 10:01 ` [PATCH 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-19 10:01 ` [PATCH 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-19 10:01 ` [PATCH 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-19 10:01 ` [PATCH 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-19 10:01 ` [PATCH 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-19 10:01 ` [PATCH 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-19 10:01 ` [PATCH 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-19 10:01 ` [PATCH 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-19 10:01 ` [PATCH 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-19 10:01 ` [PATCH 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-19 10:01 ` [PATCH 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-19 10:01 ` [PATCH 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-19 10:01 ` [PATCH 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-19 10:01 ` [PATCH 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 11:58 ` [v2 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 11:58 ` [v2 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-07-22 11:58 ` [v2 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 11:58 ` [v2 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 11:58 ` Gagandeep Singh [this message]
2024-07-22 11:58 ` [v2 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 11:58 ` [v2 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 11:58 ` [v2 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 11:58 ` [v2 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 11:58 ` [v2 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 11:58 ` [v2 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 11:58 ` [v2 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 11:58 ` [v2 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 11:58 ` [v2 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 11:58 ` [v2 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 11:58 ` [v2 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 11:58 ` [v2 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 11:58 ` [v2 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 11:58 ` [v2 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 11:58 ` [v2 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 11:58 ` [v2 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 11:58 ` [v2 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 11:58 ` [v2 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 11:58 ` [v2 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 11:58 ` [v2 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 11:58 ` [v2 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 11:58 ` [v2 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 11:58 ` [v2 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 11:58 ` [v2 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 11:58 ` [v2 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 11:58 ` [v2 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 16:39 ` [v3 00/30] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-07-22 16:39 ` [v3 01/30] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-08 7:22 ` [v4 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-08 7:22 ` [v4 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-08 10:57 ` [v5 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-08 10:57 ` [v5 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-14 9:36 ` [v6 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-14 9:36 ` [v6 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-14 9:36 ` [v6 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-14 9:36 ` [v6 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-14 9:36 ` [v6 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-14 9:36 ` [v6 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-14 9:36 ` [v6 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-14 9:36 ` [v6 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-14 9:36 ` [v6 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-15 2:59 ` Stephen Hemminger
2024-10-14 9:36 ` [v6 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-14 9:36 ` [v6 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-14 9:36 ` [v6 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-14 9:36 ` [v6 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-14 9:36 ` [v6 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-14 9:36 ` [v6 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-14 9:36 ` [v6 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-10-15 7:13 ` [v7 00/15] NXP DMA driver fixes and Enhancements Gagandeep Singh
2024-10-15 7:13 ` [v7 01/15] dma/dpaa2: configure route by port by PCIe port param Gagandeep Singh
2024-10-15 7:13 ` [v7 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-15 22:11 ` Stephen Hemminger
2024-10-16 5:09 ` Hemant Agrawal
2024-10-16 5:13 ` Stephen Hemminger
2024-10-16 5:15 ` Hemant Agrawal
2024-10-15 7:13 ` [v7 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-15 7:13 ` [v7 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-15 7:13 ` [v7 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-15 7:13 ` [v7 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-15 7:13 ` [v7 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-15 7:13 ` [v7 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-15 7:13 ` [v7 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-15 7:13 ` [v7 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-15 7:13 ` [v7 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-15 7:14 ` [v7 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-15 7:14 ` [v7 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-15 7:14 ` [v7 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-15 7:14 ` [v7 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-10-08 10:57 ` [v5 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-08 10:57 ` [v5 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-08 10:57 ` [v5 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-08 10:58 ` [v5 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-08 10:58 ` [v5 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-08 10:58 ` [v5 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-08 10:58 ` [v5 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-08 10:58 ` [v5 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-08 10:58 ` [v5 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-08 10:58 ` [v5 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-08 10:58 ` [v5 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-08 10:58 ` [v5 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-08 10:58 ` [v5 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-08 10:58 ` [v5 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-10-09 18:02 ` Stephen Hemminger
2024-10-08 7:22 ` [v4 02/15] dma/dpaa2: refactor driver code Gagandeep Singh
2024-10-08 7:22 ` [v4 03/15] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-10-08 7:22 ` [v4 04/15] dma/dpaa2: add short FD support Gagandeep Singh
2024-10-08 7:22 ` [v4 05/15] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-10-08 7:22 ` [v4 06/15] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-10-08 7:22 ` [v4 07/15] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-10-08 7:22 ` [v4 08/15] dma/dpaa: refactor driver Gagandeep Singh
2024-10-08 7:23 ` [v4 09/15] dma/dpaa: support burst capacity API Gagandeep Singh
2024-10-08 7:23 ` [v4 10/15] dma/dpaa: add silent mode support Gagandeep Singh
2024-10-08 7:23 ` [v4 11/15] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-10-08 7:23 ` [v4 12/15] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-10-08 7:23 ` [v4 13/15] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-10-08 7:23 ` [v4 14/15] dma/dpaa: add DMA error checks Gagandeep Singh
2024-10-08 7:23 ` [v4 15/15] bus/dpaa: add port bmi stats Gagandeep Singh
2024-07-22 16:39 ` [v3 02/30] dma/dpaa2: support multiple HW queues Gagandeep Singh
2024-07-22 20:19 ` Stephen Hemminger
2024-10-07 20:51 ` Stephen Hemminger
2024-07-22 16:39 ` [v3 03/30] dma/dpaa2: adapt DMA driver API Gagandeep Singh
2024-07-22 16:39 ` [v3 04/30] dma/dpaa2: multiple process support Gagandeep Singh
2024-07-22 16:39 ` [v3 05/30] dma/dpaa2: add sanity check for SG entry Gagandeep Singh
2024-07-22 20:21 ` Stephen Hemminger
2024-07-22 16:39 ` [v3 06/30] dma/dpaa2: include DPAA2 specific header files Gagandeep Singh
2024-07-22 16:39 ` [v3 07/30] dma/dpaa2: borrow flags of DMA operation to pass job context Gagandeep Singh
2024-07-22 16:39 ` [v3 08/30] bus/fslmc: enhance the qbman dq storage logic Gagandeep Singh
2024-07-22 16:39 ` [v3 09/30] dma/dpaa2: add short FD support Gagandeep Singh
2024-07-22 16:39 ` [v3 10/30] dma/dpaa2: limit the max descriptor number Gagandeep Singh
2024-07-22 16:39 ` [v3 11/30] dma/dpaa2: change the DMA copy return value Gagandeep Singh
2024-07-22 16:39 ` [v3 12/30] dma/dpaa2: move the qdma header to common place Gagandeep Singh
2024-07-22 16:39 ` [v3 13/30] dma/dpaa: support multi channels Gagandeep Singh
2024-07-22 16:39 ` [v3 14/30] dma/dpaa: fix job enqueue Gagandeep Singh
2024-07-22 16:39 ` [v3 15/30] dma/dpaa: add burst capacity API Gagandeep Singh
2024-07-22 16:39 ` [v3 16/30] dma/dpaa: add workaround for ERR050757 Gagandeep Singh
2024-07-22 16:39 ` [v3 17/30] dma/dpaa: qdma stall workaround for ERR050265 Gagandeep Singh
2024-07-22 16:39 ` [v3 18/30] dma/dpaa: remove unwanted desc Gagandeep Singh
2024-07-22 16:39 ` [v3 19/30] dma/dpaa: data path optimization Gagandeep Singh
2024-07-22 16:39 ` [v3 20/30] dma/dpaa: refactor driver Gagandeep Singh
2024-07-22 16:39 ` [v3 21/30] dma/dpaa: dequeue status queue Gagandeep Singh
2024-07-22 16:39 ` [v3 22/30] dma/dpaa: add Scatter Gather support Gagandeep Singh
2024-07-22 16:39 ` [v3 23/30] dma/dpaa: block dequeue Gagandeep Singh
2024-07-22 16:39 ` [v3 24/30] dma/dpaa: improve congestion handling Gagandeep Singh
2024-07-22 16:39 ` [v3 25/30] dma/dpaa: disable SG descriptor as default Gagandeep Singh
2024-07-22 16:39 ` [v3 26/30] dma/dpaa: improve ERRATA workaround solution Gagandeep Singh
2024-07-22 16:39 ` [v3 27/30] dma/dpaa: improve silent mode support Gagandeep Singh
2024-07-22 16:39 ` [v3 28/30] dma/dpaa: support multiple SG copies Gagandeep Singh
2024-07-22 16:39 ` [v3 29/30] dma/dpaa: support max SG entry size Gagandeep Singh
2024-07-22 16:39 ` [v3 30/30] bus/dpaa: add port bmi stats Gagandeep Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240722115843.1830105-5-g.singh@nxp.com \
--to=g.singh@nxp.com \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jun.yang@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).