From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id EE835A0093;
	Thu, 21 Apr 2022 14:31:18 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 1E9FC427F9;
	Thu, 21 Apr 2022 14:31:02 +0200 (CEST)
Received: from inva021.nxp.com (inva021.nxp.com [92.121.34.21])
 by mails.dpdk.org (Postfix) with ESMTP id 0FA1641140
 for <dev@dpdk.org>; Thu, 21 Apr 2022 14:30:59 +0200 (CEST)
Received: from inva021.nxp.com (localhost [127.0.0.1])
 by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id E21522007B7;
 Thu, 21 Apr 2022 14:30:58 +0200 (CEST)
Received: from aprdc01srsp001v.ap-rdc01.nxp.com
 (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16])
 by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 89D812007CF;
 Thu, 21 Apr 2022 14:30:58 +0200 (CEST)
Received: from lsv03274.swis.in-blr01.nxp.com (lsv03274.swis.in-blr01.nxp.com
 [92.120.147.114])
 by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 8BA1718002F4;
 Thu, 21 Apr 2022 20:30:57 +0800 (+08)
From: nipun.gupta@nxp.com
To: dev@dpdk.org
Cc: thomas@monjalon.net, g.singh@nxp.com, hemant.agrawal@nxp.com,
 gakhil@marvell.com, Nipun Gupta <nipun.gupta@nxp.com>
Subject: [PATCH 3/6] dma/dpaa2: support basic operations
Date: Thu, 21 Apr 2022 18:00:51 +0530
Message-Id: <20220421123054.22442-4-nipun.gupta@nxp.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20220421123054.22442-1-nipun.gupta@nxp.com>
References: <20220421123054.22442-1-nipun.gupta@nxp.com>
X-Virus-Scanned: ClamAV using ClamSMTP
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

From: Nipun Gupta <nipun.gupta@nxp.com>

This patch support basic DMA operations which includes
device capability and channel setup.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/dma/dpaa2/dpaa2_qdma.c | 182 +++++++++++++++++++++++++++++++++
 1 file changed, 182 insertions(+)

diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 9fa48ddfa4..785d8aea7b 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -6,6 +6,7 @@
 #include <rte_fslmc.h>
 #include <rte_dmadev.h>
 #include <rte_dmadev_pmd.h>
+#include <rte_kvargs.h>
 #include <mc/fsl_dpdmai.h>
 #include "dpaa2_qdma.h"
 #include "dpaa2_qdma_logs.h"
@@ -15,6 +16,171 @@ int dpaa2_qdma_logtype;
 uint32_t dpaa2_coherent_no_alloc_cache;
 uint32_t dpaa2_coherent_alloc_cache;
 
+static int
+dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
+		    struct rte_dma_info *dev_info,
+		    uint32_t info_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+			     RTE_DMA_CAPA_MEM_TO_DEV |
+			     RTE_DMA_CAPA_DEV_TO_DEV |
+			     RTE_DMA_CAPA_DEV_TO_MEM |
+			     RTE_DMA_CAPA_SILENT |
+			     RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
+	dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
+	dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+dpaa2_qdma_configure(struct rte_dma_dev *dev,
+		     const struct rte_dma_conf *dev_conf,
+		     uint32_t conf_sz)
+{
+	char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+	DPAA2_QDMA_FUNC_TRACE();
+
+	RTE_SET_USED(conf_sz);
+
+	/* In case QDMA device is not in stopped state, return -EBUSY */
+	if (qdma_dev->state == 1) {
+		DPAA2_QDMA_ERR(
+			"Device is in running state. Stop before config.");
+		return -1;
+	}
+
+	/* Allocate Virtual Queues */
+	sprintf(name, "qdma_%d_vq", dev->data->dev_id);
+	qdma_dev->vqs = rte_malloc(name,
+			(sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
+			RTE_CACHE_LINE_SIZE);
+	if (!qdma_dev->vqs) {
+		DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+		return -ENOMEM;
+	}
+	qdma_dev->num_vqs = dev_conf->nb_vchans;
+
+	return 0;
+}
+
+static int
+dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+		       const struct rte_dma_vchan_conf *conf,
+		       uint32_t conf_sz)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+	uint32_t pool_size;
+	char ring_name[32];
+	char pool_name[64];
+	int fd_long_format = 1;
+	int sg_enable = 0;
+
+	DPAA2_QDMA_FUNC_TRACE();
+
+	RTE_SET_USED(conf_sz);
+
+	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
+		sg_enable = 1;
+
+	if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
+		fd_long_format = 0;
+
+	if (dev->data->dev_conf.enable_silent)
+		qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
+
+	if (sg_enable) {
+		if (qdma_dev->num_vqs != 1) {
+			DPAA2_QDMA_ERR(
+				"qDMA SG format only supports physical queue!");
+			return -ENODEV;
+		}
+		if (!fd_long_format) {
+			DPAA2_QDMA_ERR(
+				"qDMA SG format only supports long FD format!");
+			return -ENODEV;
+		}
+		pool_size = QDMA_FLE_SG_POOL_SIZE;
+	} else {
+		pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
+	}
+
+	if (qdma_dev->num_vqs == 1)
+		qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
+	else {
+		/* Allocate a Ring for Virtual Queue in VQ mode */
+		snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
+			 dev->data->dev_id, vchan);
+		qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
+			conf->nb_desc, rte_socket_id(), 0);
+		if (!qdma_dev->vqs[vchan].status_ring) {
+			DPAA2_QDMA_ERR("Status ring creation failed for vq");
+			return rte_errno;
+		}
+	}
+
+	snprintf(pool_name, sizeof(pool_name),
+		"qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
+			conf->nb_desc, pool_size,
+			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
+			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+	if (!qdma_dev->vqs[vchan].fle_pool) {
+		DPAA2_QDMA_ERR("qdma_fle_pool create failed");
+		return -ENOMEM;
+	}
+
+	snprintf(pool_name, sizeof(pool_name),
+		"qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
+	qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
+			conf->nb_desc, pool_size,
+			QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
+			NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+	if (!qdma_dev->vqs[vchan].job_pool) {
+		DPAA2_QDMA_ERR("qdma_job_pool create failed");
+		return -ENOMEM;
+	}
+
+	qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
+	qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
+
+	return 0;
+}
+
+static int
+dpaa2_qdma_start(struct rte_dma_dev *dev)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+	DPAA2_QDMA_FUNC_TRACE();
+
+	qdma_dev->state = 1;
+
+	return 0;
+}
+
+static int
+dpaa2_qdma_stop(struct rte_dma_dev *dev)
+{
+	struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
+	struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+	DPAA2_QDMA_FUNC_TRACE();
+
+	qdma_dev->state = 0;
+
+	return 0;
+}
+
 static int
 dpaa2_qdma_reset(struct rte_dma_dev *dev)
 {
@@ -55,7 +221,23 @@ dpaa2_qdma_reset(struct rte_dma_dev *dev)
 	return 0;
 }
 
+static int
+dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
+{
+	DPAA2_QDMA_FUNC_TRACE();
+
+	dpaa2_qdma_reset(dev);
+
+	return 0;
+}
+
 static struct rte_dma_dev_ops dpaa2_qdma_ops = {
+	.dev_info_get     = dpaa2_qdma_info_get,
+	.dev_configure    = dpaa2_qdma_configure,
+	.dev_start        = dpaa2_qdma_start,
+	.dev_stop         = dpaa2_qdma_stop,
+	.dev_close        = dpaa2_qdma_close,
+	.vchan_setup      = dpaa2_qdma_vchan_setup,
 };
 
 static int
-- 
2.17.1