From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id A259FA0548;
	Fri, 24 Sep 2021 16:34:12 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 51F4441338;
	Fri, 24 Sep 2021 16:33:51 +0200 (CEST)
Received: from mga05.intel.com (mga05.intel.com [192.55.52.43])
 by mails.dpdk.org (Postfix) with ESMTP id 6CE8E4131B
 for <dev@dpdk.org>; Fri, 24 Sep 2021 16:33:47 +0200 (CEST)
X-IronPort-AV: E=McAfee;i="6200,9189,10116"; a="309640146"
X-IronPort-AV: E=Sophos;i="5.85,320,1624345200"; d="scan'208";a="309640146"
Received: from fmsmga006.fm.intel.com ([10.253.24.20])
 by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 24 Sep 2021 07:33:47 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.85,320,1624345200"; d="scan'208";a="703871457"
Received: from silpixa00401160.ir.intel.com ([10.55.129.96])
 by fmsmga006.fm.intel.com with ESMTP; 24 Sep 2021 07:33:45 -0700
From: Conor Walsh <conor.walsh@intel.com>
To: bruce.richardson@intel.com, fengchengwen@huawei.com, jerinj@marvell.com,
 kevin.laatz@intel.com
Cc: dev@dpdk.org,
	Conor Walsh <conor.walsh@intel.com>
Date: Fri, 24 Sep 2021 14:33:27 +0000
Message-Id: <20210924143335.1092300-5-conor.walsh@intel.com>
X-Mailer: git-send-email 2.25.1
In-Reply-To: <20210924143335.1092300-1-conor.walsh@intel.com>
References: <20210827172550.1522362-1-conor.walsh@intel.com>
 <20210924143335.1092300-1-conor.walsh@intel.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Subject: [dpdk-dev] [PATCH v5 04/12] dma/ioat: add configuration functions
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Add functions for device configuration. The info_get and close functions
are included here also. info_get can be useful for checking successful
configuration and close is used by the dmadev api when releasing a
configured device.

Signed-off-by: Conor Walsh <conor.walsh@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
 doc/guides/dmadevs/ioat.rst    |  19 ++++++
 drivers/dma/ioat/ioat_dmadev.c | 107 +++++++++++++++++++++++++++++++++
 2 files changed, 126 insertions(+)

diff --git a/doc/guides/dmadevs/ioat.rst b/doc/guides/dmadevs/ioat.rst
index 9ae1d8a2ad..b1f847d273 100644
--- a/doc/guides/dmadevs/ioat.rst
+++ b/doc/guides/dmadevs/ioat.rst
@@ -67,3 +67,22 @@ For example::
 Once probed successfully, the device will appear as a ``dmadev``, that is a
 "DMA device type" inside DPDK, and can be accessed using APIs from the
 ``rte_dmadev`` library.
+
+Using IOAT DMAdev Devices
+--------------------------
+
+To use IOAT devices from an application, the ``dmadev`` API can be used.
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~~
+
+Refer to the :ref:`Device Configuration <dmadev_device_configuration>` and
+:ref:`Configuration of Virtual DMA Channels <dmadev_vchan_configuration>` sections
+of the dmadev library documentation for details on device configuration API usage.
+
+IOAT configuration requirements:
+
+* ``ring_size`` must be a power of two, between 64 and 4096.
+* Only one ``vchan`` is supported per device.
+* Silent mode is not supported.
+* The transfer direction must be set to ``RTE_DMA_DIR_MEM_TO_MEM`` to copy from memory to memory.
diff --git a/drivers/dma/ioat/ioat_dmadev.c b/drivers/dma/ioat/ioat_dmadev.c
index b132283ba5..92c4e2b04f 100644
--- a/drivers/dma/ioat/ioat_dmadev.c
+++ b/drivers/dma/ioat/ioat_dmadev.c
@@ -12,9 +12,112 @@ static struct rte_pci_driver ioat_pmd_drv;
 
 RTE_LOG_REGISTER_DEFAULT(ioat_pmd_logtype, INFO);
 
+#define DESC_SZ sizeof(struct ioat_dma_hw_desc)
+
 #define IOAT_PMD_NAME dmadev_ioat
 #define IOAT_PMD_NAME_STR RTE_STR(IOAT_PMD_NAME)
 
+/* Configure a device. */
+static int
+ioat_dev_configure(struct rte_dma_dev *dev __rte_unused, const struct rte_dma_conf *dev_conf,
+		uint32_t conf_sz)
+{
+	if (sizeof(struct rte_dma_conf) != conf_sz)
+		return -EINVAL;
+
+	if (dev_conf->nb_vchans != 1)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Setup a virtual channel for IOAT, only 1 vchan is supported. */
+static int
+ioat_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan __rte_unused,
+		const struct rte_dma_vchan_conf *qconf, uint32_t qconf_sz)
+{
+	struct ioat_dmadev *ioat = dev->dev_private;
+	uint16_t max_desc = qconf->nb_desc;
+	int i;
+
+	if (sizeof(struct rte_dma_vchan_conf) != qconf_sz)
+		return -EINVAL;
+
+	ioat->qcfg = *qconf;
+
+	if (!rte_is_power_of_2(max_desc)) {
+		max_desc = rte_align32pow2(max_desc);
+		IOAT_PMD_DEBUG("DMA dev %u using %u descriptors", dev->data->dev_id, max_desc);
+		ioat->qcfg.nb_desc = max_desc;
+	}
+
+	/* In case we are reconfiguring a device, free any existing memory. */
+	rte_free(ioat->desc_ring);
+
+	ioat->desc_ring = rte_zmalloc(NULL, sizeof(*ioat->desc_ring) * max_desc, 0);
+	if (ioat->desc_ring == NULL)
+		return -ENOMEM;
+
+	ioat->ring_addr = rte_mem_virt2iova(ioat->desc_ring);
+
+	ioat->status_addr = rte_mem_virt2iova(ioat) + offsetof(struct ioat_dmadev, status);
+
+	/* Ensure all counters are reset, if reconfiguring/restarting device. */
+	ioat->next_read = 0;
+	ioat->next_write = 0;
+	ioat->last_write = 0;
+	ioat->offset = 0;
+	ioat->failure = 0;
+
+	/* Configure descriptor ring - each one points to next. */
+	for (i = 0; i < ioat->qcfg.nb_desc; i++) {
+		ioat->desc_ring[i].next = ioat->ring_addr +
+				(((i + 1) % ioat->qcfg.nb_desc) * DESC_SZ);
+	}
+
+	return 0;
+}
+
+/* Get device information of a device. */
+static int
+ioat_dev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *info, uint32_t size)
+{
+	struct ioat_dmadev *ioat = dev->dev_private;
+	if (size < sizeof(*info))
+		return -EINVAL;
+	info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+			RTE_DMA_CAPA_OPS_COPY |
+			RTE_DMA_CAPA_OPS_FILL;
+	if (ioat->version >= IOAT_VER_3_4)
+		info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS;
+	info->max_vchans = 1;
+	info->min_desc = 32;
+	info->max_desc = 4096;
+	return 0;
+}
+
+/* Close a configured device. */
+static int
+ioat_dev_close(struct rte_dma_dev *dev)
+{
+	struct ioat_dmadev *ioat;
+
+	if (!dev) {
+		IOAT_PMD_ERR("Invalid device");
+		return -EINVAL;
+	}
+
+	ioat = dev->dev_private;
+	if (!ioat) {
+		IOAT_PMD_ERR("Error getting dev_private");
+		return -EINVAL;
+	}
+
+	rte_free(ioat->desc_ring);
+
+	return 0;
+}
+
 /* Dump DMA device info. */
 static int
 ioat_dev_dump(const struct rte_dma_dev *dev, FILE *f)
@@ -79,7 +182,11 @@ static int
 ioat_dmadev_create(const char *name, struct rte_pci_device *dev)
 {
 	static const struct rte_dma_dev_ops ioat_dmadev_ops = {
+		.dev_close = ioat_dev_close,
+		.dev_configure = ioat_dev_configure,
 		.dev_dump = ioat_dev_dump,
+		.dev_info_get = ioat_dev_info_get,
+		.vchan_setup = ioat_vchan_setup,
 	};
 
 	struct rte_dma_dev *dmadev = NULL;
-- 
2.25.1