From: nipun.gupta@nxp.com
To: dev@dpdk.org
Cc: thomas@monjalon.net, G.Singh@nxp.com, hemant.agrawal@nxp.com,
gakhil@marvell.com, Nipun Gupta <nipun.gupta@nxp.com>
Subject: [PATCH v3 3/6] dma/dpaa2: support basic operations
Date: Thu, 5 May 2022 14:35:19 +0530 [thread overview]
Message-ID: <20220505090522.9638-4-nipun.gupta@nxp.com> (raw)
In-Reply-To: <20220505090522.9638-1-nipun.gupta@nxp.com>
From: Nipun Gupta <nipun.gupta@nxp.com>
This patch support basic DMA operations which includes
device capability and channel setup.
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/dma/dpaa2/dpaa2_qdma.c | 182 +++++++++++++++++++++++++++++++++
1 file changed, 182 insertions(+)
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 9fa48ddfa4..785d8aea7b 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -6,6 +6,7 @@
#include <rte_fslmc.h>
#include <rte_dmadev.h>
#include <rte_dmadev_pmd.h>
+#include <rte_kvargs.h>
#include <mc/fsl_dpdmai.h>
#include "dpaa2_qdma.h"
#include "dpaa2_qdma_logs.h"
@@ -15,6 +16,171 @@ int dpaa2_qdma_logtype;
uint32_t dpaa2_coherent_no_alloc_cache;
uint32_t dpaa2_coherent_alloc_cache;
+static int
+dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
+ struct rte_dma_info *dev_info,
+ uint32_t info_sz)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(info_sz);
+
+ dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+ RTE_DMA_CAPA_MEM_TO_DEV |
+ RTE_DMA_CAPA_DEV_TO_DEV |
+ RTE_DMA_CAPA_DEV_TO_MEM |
+ RTE_DMA_CAPA_SILENT |
+ RTE_DMA_CAPA_OPS_COPY;
+ dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+ dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
+ dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+
+ return 0;
+}
+
+static int
+dpaa2_qdma_configure(struct rte_dma_dev *dev,
+ const struct rte_dma_conf *dev_conf,
+ uint32_t conf_sz)
+{
+ char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
+ struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ RTE_SET_USED(conf_sz);
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev->state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before config.");
+ return -1;
+ }
+
+ /* Allocate Virtual Queues */
+ sprintf(name, "qdma_%d_vq", dev->data->dev_id);
+ qdma_dev->vqs = rte_malloc(name,
+ (sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+ RTE_CACHE_LINE_SIZE);
+ if (!qdma_dev->vqs) {
+ DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+ return -ENOMEM;
+ }
+ qdma_dev->num_vqs = dev_conf->nb_vchans;
+
+ return 0;
+}
+
+static int
+dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+ const struct rte_dma_vchan_conf *conf,
+ uint32_t conf_sz)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ uint32_t pool_size;
+ char ring_name[32];
+ char pool_name[64];
+ int fd_long_format = 1;
+ int sg_enable = 0;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ RTE_SET_USED(conf_sz);
+
+ if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
+ sg_enable = 1;
+
+ if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
+ fd_long_format = 0;
+
+ if (dev->data->dev_conf.enable_silent)
+ qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
+
+ if (sg_enable) {
+ if (qdma_dev->num_vqs != 1) {
+ DPAA2_QDMA_ERR(
+ "qDMA SG format only supports physical queue!");
+ return -ENODEV;
+ }
+ if (!fd_long_format) {
+ DPAA2_QDMA_ERR(
+ "qDMA SG format only supports long FD format!");
+ return -ENODEV;
+ }
+ pool_size = QDMA_FLE_SG_POOL_SIZE;
+ } else {
+ pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
+ }
+
+ if (qdma_dev->num_vqs == 1)
+ qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
+ else {
+ /* Allocate a Ring for Virtual Queue in VQ mode */
+ snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
+ dev->data->dev_id, vchan);
+ qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
+ conf->nb_desc, rte_socket_id(), 0);
+ if (!qdma_dev->vqs[vchan].status_ring) {
+ DPAA2_QDMA_ERR("Status ring creation failed for vq");
+ return rte_errno;
+ }
+ }
+
+ snprintf(pool_name, sizeof(pool_name),
+ "qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+ qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
+ conf->nb_desc, pool_size,
+ QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (!qdma_dev->vqs[vchan].fle_pool) {
+ DPAA2_QDMA_ERR("qdma_fle_pool create failed");
+ return -ENOMEM;
+ }
+
+ snprintf(pool_name, sizeof(pool_name),
+ "qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+ qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
+ conf->nb_desc, pool_size,
+ QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (!qdma_dev->vqs[vchan].job_pool) {
+ DPAA2_QDMA_ERR("qdma_job_pool create failed");
+ return -ENOMEM;
+ }
+
+ qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
+ qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
+
+ return 0;
+}
+
+static int
+dpaa2_qdma_start(struct rte_dma_dev *dev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev->state = 1;
+
+ return 0;
+}
+
+static int
+dpaa2_qdma_stop(struct rte_dma_dev *dev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev->state = 0;
+
+ return 0;
+}
+
static int
dpaa2_qdma_reset(struct rte_dma_dev *dev)
{
@@ -55,7 +221,23 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
return 0;
}
+static int
+dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ dpaa2_qdma_reset(dev);
+
+ return 0;
+}
+
static struct rte_dma_dev_ops dpaa2_qdma_ops = {
+ .dev_info_get = dpaa2_qdma_info_get,
+ .dev_configure = dpaa2_qdma_configure,
+ .dev_start = dpaa2_qdma_start,
+ .dev_stop = dpaa2_qdma_stop,
+ .dev_close = dpaa2_qdma_close,
+ .vchan_setup = dpaa2_qdma_vchan_setup,
};
static int
--
2.17.1
next prev parent reply other threads:[~2022-05-05 9:05 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-21 12:30 [PATCH 0/6] move DPAA2 QDMA driver freom raw to dma nipun.gupta
2022-04-21 12:30 ` [PATCH 1/6] raw/dpaa2_qdma: remove dpaa2 QDMA driver from raw nipun.gupta
2022-04-21 12:30 ` [PATCH 2/6] dma/dpaa2: introduce DPAA2 DMA driver skeleton nipun.gupta
2022-04-21 12:30 ` [PATCH 3/6] dma/dpaa2: support basic operations nipun.gupta
2022-04-21 12:30 ` [PATCH 4/6] dma/dpaa2: add PMD apis for additional configuration nipun.gupta
2022-04-21 12:30 ` [PATCH 5/6] dma/dpaa2: support DMA operations nipun.gupta
2022-04-21 12:30 ` [PATCH 6/6] dma/dpaa2: support statistics nipun.gupta
2022-05-05 7:31 ` [PATCH v2 0/6] move DPAA2 QDMA driver freom raw to dma nipun.gupta
2022-05-05 7:31 ` [PATCH v2 1/6] raw/dpaa2_qdma: remove dpaa2 QDMA driver from raw nipun.gupta
2022-05-05 7:31 ` [PATCH v2 2/6] dma/dpaa2: introduce DPAA2 DMA driver skeleton nipun.gupta
2022-05-05 7:31 ` [PATCH v2 3/6] dma/dpaa2: support basic operations nipun.gupta
2022-05-05 7:31 ` [PATCH v2 4/6] dma/dpaa2: add PMD apis for additional configuration nipun.gupta
2022-05-05 7:31 ` [PATCH v2 5/6] dma/dpaa2: support DMA operations nipun.gupta
2022-05-05 7:31 ` [PATCH v2 6/6] dma/dpaa2: support statistics nipun.gupta
2022-05-05 9:05 ` [PATCH v3 0/6] move DPAA2 QDMA driver freom raw to dma nipun.gupta
2022-05-05 9:05 ` [PATCH v3 1/6] raw/dpaa2_qdma: remove dpaa2 QDMA driver from raw nipun.gupta
2022-05-05 9:05 ` [PATCH v3 2/6] dma/dpaa2: introduce DPAA2 DMA driver skeleton nipun.gupta
2022-05-05 9:05 ` nipun.gupta [this message]
2022-05-05 9:05 ` [PATCH v3 4/6] dma/dpaa2: add PMD apis for additional configuration nipun.gupta
2022-05-05 9:05 ` [PATCH v3 5/6] dma/dpaa2: support DMA operations nipun.gupta
2022-05-05 9:05 ` [PATCH v3 6/6] dma/dpaa2: support statistics nipun.gupta
2022-05-26 6:00 ` [PATCH v3 0/6] move DPAA2 QDMA driver freom raw to dma Hemant Agrawal
2022-05-31 15:29 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220505090522.9638-4-nipun.gupta@nxp.com \
--to=nipun.gupta@nxp.com \
--cc=G.Singh@nxp.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=hemant.agrawal@nxp.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).